text
stringlengths
4
1.02M
meta
dict
from pygame.constants import RLEACCEL from thorpy.elements.clickable import Clickable from thorpy._utils.images import load_image, change_color_on_img_ip from thorpy.miscgui import constants, functions, style, painterstyle class Checker(Clickable): """Checkable check or radio box.""" def __init__(self, text="", elements=None, normal_params=None, press_params=None, value=False, namestyle=None, type_="checkbox", check_img=None): """Checkable check or radio box. <text>: text before the box. <value>: True for checked, False for not checked. <type_>: can be either 'checkbox' or 'radio'. <check_img>: if not None, define the image used for the box. """ namestyle=style.STYLE_INSERTER_NAME if namestyle is None else namestyle super(Checker, self).__init__("", elements, normal_params, press_params) if value: self._checked = value else: self._checked = False self._name_element = self._get_name_element(text, namestyle) # herite de setter self.add_elements(list([self._name_element])) self._type = type_ painter = self._gen_painter() self.set_painter(painter, False) self._check_img = self._get_check_img(check_img) params = {"size": style.CHECK_SIZE, "color": style.COLOR_HOVER_CHECK, "pressed": True} if self._type == "checkbox": painter_class = painterstyle.CHECKBOX_PAINTER elif self._type == "radio": painter_class = painterstyle.RADIO_PAINTER self.normal_params.polite_set( "params hover", { "painter": painter_class, "params": params}) self.normal_params.polite_set("typ hover", "redraw") def _get_check_img(self, check_img, colorkey=style.CHECKBOX_IMG_COLORKEY): """check_img can either be a path or a pygame Surface""" if not check_img: if self._type == "checkbox": check_img = style.CHECKBOX_IMG elif self._type == "radio": check_img = style.RADIO_IMG check_img = load_image(check_img) check_img.set_colorkey(colorkey, RLEACCEL) return check_img else: return check_img def _reaction_press(self, pygame_event): Clickable._reaction_press(self, pygame_event) if self.current_state_key == constants.STATE_PRESSED: self.check() def set_check_img(self, path, color=constants.BLACK, color_src=constants.BLACK, colorkey=constants.WHITE): img = self._get_check_img(path, colorkey=colorkey) if color != constants.BLACK: img = change_color_on_img_ip(img, color_src, color) self._check_img = img def _gen_painter(self): if self._type == "checkbox": return functions.obtain_valid_painter( painterstyle.CHECKBOX_PAINTER, color=style.DEF_COLOR2, size=style.CHECK_SIZE, pressed=True) elif self._type == "radio": return functions.obtain_valid_painter( painterstyle.RADIO_PAINTER, size=style.CHECK_SIZE, pressed=True) def finish(self): Clickable.finish(self) self._refresh_pos() self._name_element.user_func = self.check def check(self): """Check, blit and update element.""" self._checked = not self._checked self.unblit() self.blit() ## self.transp_blit() self.update() def solo_blit(self): Clickable.solo_blit(self) if self._checked: clip = self.get_clip() r = self._check_img.get_rect() r.center = clip.center self.surface.blit(self._check_img, r) def _get_name_element(self, name, namestyle): painter = functions.obtain_valid_painter( painterstyle.CHECKER_NAME_PAINTER, size=style.SIZE) el = Clickable(name) el.set_painter(painter) if namestyle: el.set_style(namestyle) el.finish() return el def unblit(self, rect=None): self._name_element.unblit(rect) Clickable.unblit(self, rect) def transp_blit(self): a = self.get_oldest_children_ancester() r = self.get_storer_rect() a.unblit(r) a.partial_blit(None, r) def _refresh_pos(self): l = self.get_fus_topleft()[0] (x, y) = self.get_fus_center() l -= self._name_element.get_fus_size()[0] + 5 self._name_element.set_center((None, y)) self._name_element.set_topleft((l, None)) def get_storer_rect(self): return self.get_family_rect(constants.STATE_NORMAL) def get_value(self): return self._checked def set_font_color(self, color, state=None, center_title=True): """set font color for a given state""" Clickable.set_font_color(self, color, state, center_title) self._name_element.set_font_color(color, state, center_title) def set_font_size(self, size, state=None, center_title=True): """set font color for a given state""" Clickable.set_font_size(self, size, state, center_title) self._name_element.set_font_size(size, state, center_title) def set_font(self, fontname, state=None, center_title=True): """set font for a given state""" Element.set_font(self, fontname, state, center_title) self.set_hovered_states(self._states_hover) def set_font_effects(self, biu, state=None, center=True, preserve=False): """biu = tuple : (bold, italic, underline)""" CLickable.set_font_effects(self, bio, state, center, preserve) self._name_element.set_font_effects(biu, state, center, preserve) def get_help_rect(self): return self.get_family_rect()
{ "content_hash": "06d8fff3a4c38d64dbefebd71c325ec5", "timestamp": "", "source": "github", "line_count": 165, "max_line_length": 88, "avg_line_length": 37.17575757575757, "alnum_prop": 0.5787414411477013, "repo_name": "YannThorimbert/ThorPy-1.4.3", "id": "b4d43424091dab413656694008050f72f75c8464", "size": "6134", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "thorpy/elements/checker.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "401722" } ], "symlink_target": "" }
""" Django settings for cpof project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '%+6u3k73cj_s06!t)$=j24%bd!4(vouajz%v&-*m!)_g2#64h!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django_admin_bootstrapped', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'xadmin', 'crispy_forms', 'cpof.pools', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'cpof.urls' WSGI_APPLICATION = 'cpof.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'openflow', 'USER': 'root', 'PASSWORD': '', 'HOST': '127.0.0.1', 'PORT': '3306', #'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'pt_BR' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/'
{ "content_hash": "31e00b7a0ec49c82d5e220cf05c01c4f", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 71, "avg_line_length": 23.81111111111111, "alnum_prop": 0.7022865142323845, "repo_name": "rafaelsilvag/Rypace", "id": "9feb2c1cd605a93160bb9acacd00c9bcdbb06f51", "size": "2143", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ryp-web/cpof/settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "24877" }, { "name": "Shell", "bytes": "127" } ], "symlink_target": "" }
import datetime from app import db class BucketList(db.Model): id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(100), unique=True) description = db.Column(db.Text, nullable=True) interests = db.Column(db.String(120), nullable=True) date_created = db.Column(db.DateTime, default=datetime.datetime.utcnow()) date_modified = db.Column(db.DateTime) created_by = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) items = db.relationship('Item', backref='bucket_list_items', lazy='dynamic') def __repr__(self): return "<Bucketlist {}>".format(self.name) class Item(db.Model): id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(100), unique=True) description = db.Column(db.Text) status = db.Column(db.Text) date_accomplished = db.Column(db.DateTime) date_created = db.Column(db.DateTime, default=datetime.datetime.utcnow()) date_modified = db.Column(db.DateTime) bucketlists = db.Column(db.Integer, db.ForeignKey('bucket_list.id'), nullable=False) def __repr__(self): return "<Items {}>".format(self.name)
{ "content_hash": "8c21bb1858662087c392590f6d64d574", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 88, "avg_line_length": 39.733333333333334, "alnum_prop": 0.6879194630872483, "repo_name": "SerryJohns/bucket-list", "id": "790bd96b30fe676462081306a479a799d322e3be", "size": "1192", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/api/v1/models/bucketlist.py", "mode": "33188", "license": "mit", "language": [ { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "45121" } ], "symlink_target": "" }
from setuptools import setup, find_packages setup( name='panya-paste', version='0.1.0', description='Python Paste templates creating Panya buildout environments.', long_description = open('README.rst', 'r').read() + open('AUTHORS.rst', 'r').read() + open('CHANGELOG.rst', 'r').read(), author='Praekelt Foundation', author_email='dev@praekelt.com', license='BSD', url='http://github.com/praekelt/panya-paste', packages = find_packages(), install_requires = [ 'Cheetah>=2.4.2.1', 'PasteScript', 'setuptools', ], include_package_data=True, entry_points = """ [paste.paster_create_template] panya_project=panya_paste.templates:PanyaProjectTemplate """, classifiers = [ "Programming Language :: Python", "License :: OSI Approved :: BSD License", "Development Status :: 4 - Beta", "Operating System :: OS Independent", "Framework :: Paste", "Intended Audience :: Developers", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", ], zip_safe=False )
{ "content_hash": "a886a74d5eccfcea1d2f87bd763d081d", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 124, "avg_line_length": 33.54545454545455, "alnum_prop": 0.6070460704607046, "repo_name": "praekelt/panya-paste", "id": "fbbb1cff14b7a6bbfedd32ce5554ff0b162a0c68", "size": "1107", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "10800" } ], "symlink_target": "" }
from stromx import runtime, cvimgproc, cvsupport factory = runtime.Factory() runtime.register(factory) cvimgproc.register(factory) cvsupport.register(factory) stream = runtime.XmlReader().readStream("camera.xml", factory) stream.start() camera = stream.operators()[0] canny = stream.operators()[2] for i in range(5): with canny.getOutputData(1) as data, runtime.ReadAccess(data) as image: print "Received image {0}x{1}".format(image.get().width(), image.get().height()) canny.clearOutputData(1) camera.clearOutputData(1) stream.stop() stream.join()
{ "content_hash": "167d5f6c0ddb1354ffc322a36cf11ba4", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 75, "avg_line_length": 25.08, "alnum_prop": 0.6698564593301436, "repo_name": "sparsebase/stromx", "id": "bdb4d73a63b1ea4c33f56ed715274a0823010792", "size": "1245", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/example/camera.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "2882239" }, { "name": "CMake", "bytes": "63121" }, { "name": "Python", "bytes": "215111" } ], "symlink_target": "" }
''' run the CCR scorer on all the groups, including no restriction to a single group ''' import os import sys import json import subprocess import multiprocessing from collections import defaultdict targets = json.load(open('../../trec-kba-ccr-and-ssf-query-topics-2013-07-16.json'))['targets'] groups = set() for targ in targets: groups.add(targ['group']) groups.remove('kba2012') groups = list(groups) groups.sort() slot_types = ['Affiliate', 'TopMembers', 'FoundedBy', 'Contact_Meet_Entity', 'AssociateOf', 'Contact_Meet_PlaceTime', 'AwardsWon', 'DateOfDeath', 'CauseOfDeath', 'Titles', 'FounderOf', 'EmployeeOf', 'SignificantOther', 'Children'] primary_commands = [] commands = [] ccr_template = "(python -m kba.scorer.ccr %s --cutoff-step %d ../../2013-kba-runs/ ../../trec-kba-ccr-judgments-2013-09-26-expanded-with-ssf-inferred-vitals-plus-len-clean_visible-corrected.before-and-after-cutoff.filter-run.txt | gzip ) >& logs/2013-kba-runs-ccr-%s.log.gz" ssf_template = "(python -m kba.scorer.ssf %s --cutoff-step %d ../../2013-kba-runs/ ../../trec-kba-ssf-target-events-2013-07-16-expanded-stream-ids.json | gzip ) &> logs/2013-kba-runs-ssf-%s.log.gz" avg_flag = '' step_size = 10 for group in groups: flags = avg_flag + ' --group %s --topics-path ../../trec-kba-ccr-and-ssf-query-topics-2013-07-16.json ' % group log_name = avg_flag + '-' + group cmd = ccr_template % (flags, step_size, log_name) commands.append(cmd) #print cmd for entity_type in ['PER', 'ORG', 'FAC']: flags = avg_flag + ' --entity-type %s --topics-path ../../trec-kba-ccr-and-ssf-query-topics-2013-07-16.json ' % entity_type log_name = avg_flag + '-' + entity_type cmd = ccr_template % (flags, step_size, log_name) commands.append(cmd) #print cmd for slot_type in slot_types: flags = avg_flag + ' --slot-type ' + slot_type + ' ' log_name = avg_flag + '-' + slot_type cmd = ssf_template % (flags, step_size, log_name) commands.append(cmd) #print cmd for reject_flag in ['', '--reject-wikipedia', '--reject-twitter']: flags = ' '.join([avg_flag, reject_flag]) log_name = '-'.join([avg_flag, reject_flag]) cmd = ssf_template % (flags, step_size, log_name) if flags.strip(): ## only do cmds with at least one flag commands.append(cmd) #print cmd for rating_flag in ['', '--include-useful']: flags = ' '.join([avg_flag, rating_flag, reject_flag]) log_name = '-'.join([avg_flag, rating_flag, reject_flag]) cmd = ccr_template % (flags, step_size, log_name) if flags.strip(): ## only do cmds with at least one flag commands.append(cmd) #print cmd step_size = 1 cmd = ccr_template % ('', step_size, 'primary') commands.insert(0, cmd) cmd = ccr_template % (' --require-positiv ', step_size, 'primary-req-pos') commands.insert(0, cmd) cmd = ssf_template % ('', step_size, 'primary') commands.insert(0, cmd) cmd = ssf_template % (' --pooled-only ', step_size, 'primary-pooled-only') commands.insert(0, cmd) print len(commands), 'tasks to do' sys.stdout.flush() def run(cmd): print cmd sys.stdout.flush() p = subprocess.Popen(cmd, shell=True, executable="/bin/bash") p.wait() #sys.exit() #pool = multiprocessing.Pool(3, maxtasksperchild=1) #pool.map(run, primary_commands) #pool.close() #pool.join() pool = multiprocessing.Pool(8, maxtasksperchild=1) pool.map(run, commands) pool.close() pool.join() ''' ips = open('good-ips').read().splitlines() base_cmd = "cd /data/trec-kba/users/jrf/KBA/2013/entities/score/src && (" assignments = defaultdict(set) i = 0 while commands: i += 1 assignments[ips[i % len(ips)]].add(commands.pop()) counts = defaultdict(int) for ip in ips: counts[len(assignments[ip])] += 1 print counts remote_cmds = dict() for ip in ips: remote_cmds[ip] = base_cmd + ') && ('.join(list(assignments[ip])) + ')' #print '\n'.join(map(str, remote_cmds.items())) for ip, remote_cmd in remote_cmds.items(): cmd = 'ssh %s "echo \\"%s\\" > jobs.sh; chmod a+x jobs.sh" &' % (ip, remote_cmd) print cmd os.system(cmd) '''
{ "content_hash": "bcfde4e68144ba42cde7bc7717c66a5d", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 275, "avg_line_length": 32.59055118110236, "alnum_prop": 0.6431505194491423, "repo_name": "trec-kba/kba-scorer", "id": "2dd0957479d881f5bf56fa1833ced4612529817c", "size": "4139", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/scripts/run-all-scorings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "96245" }, { "name": "R", "bytes": "7451" } ], "symlink_target": "" }
"""Base class for concurrency policy.""" from __future__ import absolute_import, division import collections # Namedtuples for management requests. Used by the Message class to communicate # items of work back to the policy. AckRequest = collections.namedtuple( "AckRequest", ["ack_id", "byte_size", "time_to_ack"] ) DropRequest = collections.namedtuple("DropRequest", ["ack_id", "byte_size"]) LeaseRequest = collections.namedtuple("LeaseRequest", ["ack_id", "byte_size"]) ModAckRequest = collections.namedtuple("ModAckRequest", ["ack_id", "seconds"]) NackRequest = collections.namedtuple("NackRequest", ["ack_id", "byte_size"])
{ "content_hash": "7c3c2482252c5b7c4107d1e1284218c2", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 79, "avg_line_length": 33.68421052631579, "alnum_prop": 0.73125, "repo_name": "tseaver/google-cloud-python", "id": "ac1df0af8efff0514c14a3f9c152bbf7e0e8758d", "size": "1237", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "pubsub/google/cloud/pubsub_v1/subscriber/_protocol/requests.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1094" }, { "name": "Python", "bytes": "30519057" }, { "name": "Shell", "bytes": "9148" } ], "symlink_target": "" }
import colorama import common import importlib import registry import os import shutil import subprocess import sys import time from collections import defaultdict sys.path.append(os.path.join(common.PROJECT_ROOT_PATH)) import config EQUAL_PERFORMANCE_EPSILON = 3.0 # in percents COLOR_READY = '\033[92m' COLOR_NOT_FOUND = '\033[91m' COLOR_NOTE = '\033[95m' COLOR_SUMMARY = '\033[93m' COLOR_DEFAULT = '\033[0m' FRAMEWORK_DIR = 'framework' BENCHMARKS_DIR = 'benchmarks' BUILD_DIR = 'build' DATA_DIR = 'data' BENCHMARKS_PATH = os.path.join(common.PROJECT_ROOT_PATH, BENCHMARKS_DIR) BUILD_PATH = os.path.join(common.PROJECT_ROOT_PATH, BUILD_DIR) # configure console colors on Windows colorama.init() def get_compiler_version(compiler): unknown_version = 'unknown compiler version' language = None for language_configuration in registry.languages: for build_configuration in language_configuration['build_configurations']: if build_configuration['compiler'] == compiler: language = language_configuration['language'] break if language is None: return unknown_version language_module = importlib.import_module(language) get_version_func_name = 'get_' + compiler + '_version' get_version = getattr(language_module, get_version_func_name, None) if get_version is None: return unknown_version version_string = get_version(config.compilers[compiler]) if not version_string: return unknown_version return version_string def detect_available_compilers(): show_config_notice = False for compiler, path in list(config.compilers.items()): print('----------------------------------------') print('| compiler: {}'.format(compiler)) if os.path.exists(path): print('| verinfo : {}'.format(get_compiler_version(compiler))) print('| status : ' + COLOR_READY + 'READY' + COLOR_DEFAULT) else: print('| status : ' + COLOR_NOT_FOUND + 'NOT FOUND' + COLOR_DEFAULT) del config.compilers[compiler] show_config_notice = True print('----------------------------------------') if not config.compilers: print('\nNo compilers found!\nUpdate config.py by specifying paths to the installed compilers') sys.exit() elif show_config_notice: print(COLOR_NOTE + 'NOTE' + COLOR_DEFAULT + ': compiler paths can be specified in config.py script') def get_options(): options = { 'skip_build' : False } for opt in [opt for opt in sys.argv[1:] if opt.startswith('--')]: if opt == '--no-build': options['skip_build'] = True else: print('unknown option ' + opt) sys.exit() return options def get_benchmarks(): benchmarks = sorted(os.listdir(BENCHMARKS_PATH)) selected_benchmarks = [b for b in sys.argv[1:] if not b.startswith('--')] if selected_benchmarks: for b in selected_benchmarks: if not b in benchmarks: print('unknown benchmark' + b) sys.exit() benchmarks = selected_benchmarks else: simple_benchmarks = sorted([b for b in benchmarks if is_simple_benchmark(b)]) complex_benchmarks = sorted([b for b in benchmarks if not is_simple_benchmark(b)]) benchmarks = simple_benchmarks + complex_benchmarks return benchmarks def get_benchmark_languages(benchmark): directories = os.listdir(os.path.join(BENCHMARKS_PATH, benchmark)) return sorted([dir for dir in directories if dir.startswith('lang_')]) def is_simple_benchmark(benchmark): tag_file = os.path.join(BENCHMARKS_PATH, benchmark, 'simple') return os.path.exists(tag_file) def get_language_configuration(language): return next((c for c in registry.languages if c['language'] == language), None) def get_language_display_name(language): return get_language_configuration(language)['display_name'] def get_active_build_configurations(language_configuration): for build_configuration in language_configuration['build_configurations']: compiler = build_configuration['compiler'] if config.compilers.get(compiler) is not None: yield build_configuration def build_benchmark_with_configuration(benchmark, language, build_configuration): language_module = importlib.import_module(language) builder_func_name = build_configuration['builder'] if builder_func_name not in dir(language_module): print('failed to find builder function: ' + builder_func_name) sys.exit() compiler = build_configuration['compiler'] compiler_path = config.compilers.get(compiler) if compiler_path is None: print('unknown compiler name: ' + compiler) output_dir = os.path.join(BUILD_PATH, benchmark, language, compiler) language_dir = os.path.join(BENCHMARKS_PATH, benchmark, language) build_launcher_script = ( "from " + language + " import " + builder_func_name + "\n" + builder_func_name + "(r'" + language_dir + "', " + "r'" + output_dir + "', " + "r'" + compiler_path + "')" ) os.makedirs(output_dir) sys.stdout.flush() exit_code = subprocess.call(['python', '-c', build_launcher_script]) if exit_code != 0: sys.exit() executable = os.path.join(output_dir, common.EXECUTABLE_NAME) if not os.path.exists(executable): print('failed to build benchmark {} with compiler {}'.format(benchmark, compiler)) sys.exit() def build_benchmark(benchmark): os.environ['PYTHONPATH'] = FRAMEWORK_DIR for language in get_benchmark_languages(benchmark): language_configuration = get_language_configuration(language) if language_configuration is None: print('configuration for {0} is not specified in config.py'.format(language)) continue for build_configuration in get_active_build_configurations(language_configuration): build_benchmark_with_configuration(benchmark, language, build_configuration) def run_benchmark(benchmark, scorecard): print('---------------------------') print('Running ' + benchmark) print('---------------------------') scorecard.on_benchmark_start(benchmark) data_dir = os.path.join(BENCHMARKS_PATH, benchmark, DATA_DIR) for language in get_benchmark_languages(benchmark): language_configuration = get_language_configuration(language) if language_configuration is None: continue language_best_time = sys.float_info.max for build_configuration in get_active_build_configurations(language_configuration): print(language + '/' + build_configuration['compiler']) output_dir = os.path.join(BUILD_PATH, benchmark, language, build_configuration['compiler']) executable = os.path.join(output_dir, common.EXECUTABLE_NAME) sys.stdout.flush() exit_code = subprocess.call([executable, data_dir]) if exit_code > 0: # validation failure or runtime error sys.exit(exit_code) benchmark_result = 0 try: with open(os.path.join(output_dir, 'timing')) as f: content = f.readline() benchmark_result = int(content) except OSError: print('failed to read benchmark timing') sys.exit(1) elapsed_time = benchmark_result / 1000.0 print("{:.3f}".format(elapsed_time)) compiler = build_configuration.get('name') if compiler is None: compiler = build_configuration['compiler'] scorecard.register_benchmark_time(language, compiler, elapsed_time) scorecard.on_benchmark_end() class Scorecard: def __init__(self): self.scores = defaultdict(int) self.compiler_relative_times_simple = defaultdict(float) self.language_relative_times_simple = defaultdict(float) self.compiler_relative_times_complex = defaultdict(float) self.language_relative_times_complex = defaultdict(float) def on_benchmark_start(self, benchmark): simple_benchmark = is_simple_benchmark(benchmark) self.compiler_relative_times = self.compiler_relative_times_simple if \ simple_benchmark else self.compiler_relative_times_complex self.language_relative_times = self.language_relative_times_simple if \ simple_benchmark else self.language_relative_times_complex self.compiler_times = {} self.language_times = {} self.points = [10, 5] if simple_benchmark else [20, 10] def register_benchmark_time(self, language, compiler, time): self.compiler_times[compiler] = time language_best_time = self.language_times.get(language, sys.float_info.max) self.language_times[language] = min(language_best_time, time) def on_benchmark_end(self): if not self.language_times or not self.points: return # update compiler relative times sorted_compiler_times = sorted(self.compiler_times.items(), key=lambda x: x[1]) compiler_normalization_coeff = 1.0 / sorted_compiler_times[0][1] for (compiler, time) in sorted_compiler_times: self.compiler_relative_times[compiler] += time * compiler_normalization_coeff # update language relative times sorted_language_times = sorted(self.language_times.items(), key=lambda x: x[1]) language_normalization_coeff = 1.0 / sorted_language_times[0][1] for (language, time) in sorted_language_times: self.language_relative_times[language] += time * language_normalization_coeff # update scores cur_place_index = 0 cur_place_time = 0.0 prev_earned_points = 0 print('') for i, (language, time) in enumerate(sorted_language_times): if i == 0: cur_place_time = time else: performance_difference = (time - cur_place_time) / cur_place_time * 100 if performance_difference > EQUAL_PERFORMANCE_EPSILON: cur_place_index += 1 cur_place_time = time earned_points = self.points[cur_place_index] if cur_place_index < len(self.points) else 0 if i >= 2 and earned_points != prev_earned_points: earned_points = 0 prev_earned_points = earned_points self.scores[language] += earned_points print('{:3} earned {:2} points, relative time {:.2f}'.format( get_language_display_name(language), earned_points, time * language_normalization_coeff)) print('') self.compiler_relative_times = None self.language_relative_times = None self.compiler_times = None self.language_times = None self.points = None @staticmethod def print_relative_times(relative_times, name_mapper, caption): if len(relative_times) < 2: return sorted_relative_times = sorted(relative_times.items(), key=lambda x: x[1]) normalization_coeff = 1.0 / sorted_relative_times[0][1] print(caption) for (name, relative_time) in sorted_relative_times: normalized_time = relative_time * normalization_coeff print('{:5} {:.2f}'.format(name_mapper(name), normalized_time)) print('') def print_summary(self): # group languages with the same scores sorted_scores = sorted(self.scores.items(), key=lambda x: x[1], reverse=True) final_results = [] prev_score = 0 for i, (language, score) in enumerate(sorted_scores): if i > 0 and score == prev_score: final_results[-1][1].append(language) else: final_results.append((score, [language])) prev_score = score # print relative times for simple benchmarks sys.stdout.write(COLOR_SUMMARY) print('----------------------------------------') print('Simple benchmarks stats') print('----------------------------------------') sys.stdout.write(COLOR_DEFAULT) Scorecard.print_relative_times( self.compiler_relative_times_simple, lambda x: x, 'Compiler relative times:') Scorecard.print_relative_times( self.language_relative_times_simple, get_language_display_name, 'Language relative times:') # print relative times for complex benchmarks sys.stdout.write(COLOR_SUMMARY) print('----------------------------------------') print('Complex benchmarks stats') print('----------------------------------------') sys.stdout.write(COLOR_DEFAULT) Scorecard.print_relative_times( self.compiler_relative_times_complex, lambda x: x, 'Compiler relative times:') Scorecard.print_relative_times( self.language_relative_times_complex, get_language_display_name, 'Language relative times:') # print final scores sys.stdout.write(COLOR_SUMMARY) print('----------------------------------------') print('SUMMARY') print('----------------------------------------') sys.stdout.write(COLOR_DEFAULT) for i, (score, languages) in enumerate(final_results): languages_str = ', '.join(map(lambda x: get_language_display_name(x), languages)) if i == 0: winner_suffix = ' DOMINATES!' # special case #1: no winner detected if len(languages) > 1: winner_suffix = '. THE BORING DRAW, THE BORING UNIVERSE...' # special case #2: only single language was benchmarked elif len(self.scores) == 1: winner_suffix = '. YOU CAN\'T DOMINATE WHEN YOU ARE ALONE' print('Place 1 [{:2} points]. {}{}'.format(score, languages_str, winner_suffix)) else: print('Place {} [{:2} points]. {}'.format(i+1, score, languages_str)) # DigitalWhip main if __name__ == '__main__': print('Detecting available compilers...') detect_available_compilers() print('') options = get_options() benchmarks = get_benchmarks() if not options['skip_build']: print('Building benchmarks source code...') if os.path.exists(BUILD_PATH): shutil.rmtree(BUILD_PATH) os.makedirs(BUILD_PATH) for benchmark in benchmarks: build_benchmark(benchmark) print('') scorecard = Scorecard() for benchmark in benchmarks: run_benchmark(benchmark, scorecard) scorecard.print_summary()
{ "content_hash": "68b0b552c3e6b8d84681dbf134d2e410", "timestamp": "", "source": "github", "line_count": 413, "max_line_length": 108, "avg_line_length": 36.300242130750604, "alnum_prop": 0.6087246531483458, "repo_name": "artemalive/DigitalWhip", "id": "af438b64bd2a13023a66ed3a1731e404a9d39a6c", "size": "14992", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "framework/master.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "30" }, { "name": "C++", "bytes": "78985" }, { "name": "D", "bytes": "75014" }, { "name": "Go", "bytes": "60152" }, { "name": "Python", "bytes": "50741" }, { "name": "Shell", "bytes": "29" } ], "symlink_target": "" }
""" NAME shannon.py PURPOSE Methods for calculating various information gains during binary classification. COMMENTS Copied from informationgain.py at https://github.com/CitizenScienceInAstronomyWorkshop/Bureaucracy METHODS shannon(x): expectedInformationGain(p0, M_ll, M_nn) informationGain(p0, M_ll, M_nn, c) BUGS AUTHORS The code in this file was written by Edwin Simpson and Phil Marshall during the Citizen Science in Astronomy Workshop at ASIAA, Taipei, in March 2014, hosted by Meg Schwamb. This file is part of the Space Warps project, which is distributed under the MIT license by the Space Warps Science Team. http://spacewarps.org/ HISTORY 2014-05-21 Incorporated into SWAP code Baumer & Davis (KIPAC) LICENCE The MIT License (MIT) Copyright (c) 2014 CitizenScienceInAstronomyWorkshop Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #============================================================================ from numpy import log2, ndarray # ---------------------------------------------------------------------------- # The Shannon function: def shannon(x): if isinstance(x, ndarray) == False: if x>0: res = x*log2(x) else: res = 0.0 else: x[x == 0] = 1.0 res = x*log2(x) return res # ---------------------------------------------------------------------------- # The Shannon entropy, S def shannonEntropy(x): if isinstance(x, np.ndarray) == False: if x>0 and (1.-x)>0: res = -x*np.log2(x) - (1.-x)*np.log2(1.-x) else: res = 0.0 else: x[x == 0] = 1.0 res = -x*np.log2(x) x[x == 1] = 0.0 res = res - (1.-x)*np.log2(1.-x) return res # ---------------------------------------------------------------------------- # Expectation value of the information that would be contributed by an # agent defined by confusion matrix M when presented with a subject # having probability p0, over both possible truths and both # possible classifications: def expectedInformationGain(p0, M_ll, M_nn): p1 = 1-p0 I = p0 * (shannon(M_ll) + shannon(1-M_ll)) \ + p1 * (shannon(M_nn) + shannon(1-M_nn)) \ - shannon(M_ll*p0 + (1-M_nn)*p1) \ - shannon((1-M_ll)*p0 + M_nn*p1) return I # ---------------------------------------------------------------------------- # The information gain (relative entropy) contributed by an agent, defined by # confusion matrix M, having classified a subject, that arrived having # probability 'p0', as being 'c' (lens/not = true/false): def informationGain(p0, M_ll, M_nn, lens): p1 = 1-p0 if lens: M_cl = M_ll M_cn = 1-M_nn else: M_cl = 1-M_ll M_cn = M_nn pc = M_cl*p0 + M_cn*p1 p0_c = M_cl/pc p1_c = M_cn/pc I = p0*shannon(p0_c) + p1*shannon(p1_c) return I # ---------------------------------------------------------------------------- # Bayesian update of the probability of a subject by an agent whose # confusion matrix is defined by M def update(p0,M_ll,M_nn,lens): if(lens): M_cl = M_ll M_cn = 1.0 - M_nn else: M_cl = 1.0 - M_ll M_cn = M_nn return p0*M_cl/(p0*M_cl+(1.0-p0)*M_cn) # PJM: I re-factored this so that the update eqn was in terms of # M_cl and M_cn (to match my notes). # ---------------------------------------------------------------------------- # The change in subject entropy transmitted by an agent, having classified a # subject, that arrived having probability 'p0' and has new # probability 'p1' def entropyChange(p0, M_ll, M_nn, c): p1 = update(p0,M_ll,M_nn,c) I = mutualInformation(p0,p1) return I # ---------------------------------------------------------------------------- # The mutual information between states with probability 'p0' and 'p1' def mutualInformation(p0,p1): I = shannonEntropy(p0) - shannonEntropy(p1) return I #============================================================================
{ "content_hash": "4fd3c151eb8e5b965fd8ef2747313a36", "timestamp": "", "source": "github", "line_count": 180, "max_line_length": 80, "avg_line_length": 28.544444444444444, "alnum_prop": 0.5642273258077073, "repo_name": "melaniebeck/GZExpress", "id": "4be43dbb6452635cbc6c9be4ea024db63e0cd876", "size": "5216", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "analysis/swap/shannon.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "20158" }, { "name": "Python", "bytes": "288651" } ], "symlink_target": "" }
""" code changes must be documented in *one central place per developer*, not per module. """ import os import datetime from django.conf import settings from lino.utils import i2d, i2t from lino.utils.restify import restify, doc2rst from lino.api import dd, rt blogs = dd.resolve_app('blogs') tickets = dd.resolve_app('tickets') class Blogger(object): def __init__(self, user=None): self.objects_list = [] self.date = None self.user = user self.current_project = None def set_date(self, d): self.date = i2d(d) def set_project(self, project): self.current_project = project def set_user(self, username): self.user = settings.SITE.user_model.objects.get(username=username) def add_object(self, obj): self.objects_list.append(obj) return obj def project(self, ref, title, body, raw_html=False, **kw): if not raw_html: body = restify(doc2rst(body)) kw.update(ref=ref) kw.update(description=body) kw.update(name=title) if self.project: kw.setdefault('parent', self.current_project) return self.add_object(tickets.Project(**kw)) def milestone(self, ref, date, body=None, raw_html=False, **kw): if not raw_html: body = restify(doc2rst(body)) kw.update(ref=ref) #~ kw.update(checkin=checkin) #~ kw.update(description=body) if self.project: kw.setdefault('project', self.current_project) return self.add_object(tickets.Milestone(**kw)) #~ def change(self,time,title,body,module=None,tags=None,issue=None,raw_html=False): def entry(self, ticket, time, title, body, raw_html=False, **kw): if isinstance(time, (basestring, int)): time = i2t(time) kw.update(created=datetime.datetime.combine(self.date, time)) if not raw_html: body = restify(doc2rst(body)) kw.update(user=self.user) kw.update(body=body) kw.update(title=title) kw.update(ticket=ticket) return self.add_object(blogs.Entry(**kw)) def follow(self, prev, time, body, raw_html=False, **kw): return self.entry( prev.ticket, time, prev.title + " (continued)", body, raw_html=raw_html, **kw) def ticket(self, project_ref, title, body, raw_html=False, **kw): if not raw_html: body = restify(doc2rst(body)) kw.update(description=body) kw.update(summary=title) project = tickets.Project.get_by_ref(project_ref) #~ try: #~ project=tickets.Project.objects.get(ref=project_ref) #~ except tickets.Project.DoesNotExist,e: #~ raise Exception("No project with reference %r" % project_ref) kw.update(project=project) #~ kw.update(project=tickets.Project.objects.get(ref=project_ref)) return self.add_object(tickets.Ticket(**kw)) def flush(self): for o in self.objects_list: yield o self.objects_list = [] #~ ENTRY_TYPE_CHANGE = 1 #~ ENTRY_TYPE_ISSUE = 2 #~ class Entry(object): #~ raw_html = False #~ def __init__(self,date,title,body,module=None,tags=None,issue=None): # ~ # def __init__(self,module,date,tags,body): #~ self.title = title #~ self.date = i2d(date) #~ self.tags = tags #~ self.body = restify(doc2rst(body)) #~ self.module = module #~ self.issue = issue #~ def __unicode__(self): #~ return "(%s %s) : [%s] %s" % (self.date,self.title,self.tags,self.body) #~ class CodeChange(Entry): pass #~ class Issue(Entry): pass #~ def build_blog_entries(**kw): #~ from lino.modlib.blog import models import Entry #~ global ENTRIES_LIST #~ for e in ENTRIES_LIST: #~ if not e.raw_html: #~ body = restify(doc2rst(e.body)) #~ yield blogs.Entry(created=e.date,title=e.title,e.body,**kw) #~ ENTRIES_LIST = [] #~ blogger = Blogger()
{ "content_hash": "d54f3e3cd99f1a2877255093aeea7152", "timestamp": "", "source": "github", "line_count": 126, "max_line_length": 88, "avg_line_length": 31.928571428571427, "alnum_prop": 0.6030325627641064, "repo_name": "lino-framework/xl", "id": "0c83bd699eafe561b6b9bd894ecd0d129442769a", "size": "4159", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lino_xl/lib/tickets/blogger.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "186625" }, { "name": "HTML", "bytes": "1417287" }, { "name": "JavaScript", "bytes": "1630929" }, { "name": "PHP", "bytes": "40437" }, { "name": "Python", "bytes": "2395471" } ], "symlink_target": "" }
"""Write Your Domain Module Here """
{ "content_hash": "baff22f6e9e9f4e3e838a1020535a863", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 32, "avg_line_length": 18.5, "alnum_prop": 0.6756756756756757, "repo_name": "bufferx/twork", "id": "a61232402da46f5d86639e1176533828f66e8ec2", "size": "37", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scaffold/twork_app/twork_app/domain/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "1494" }, { "name": "Python", "bytes": "56206" }, { "name": "Shell", "bytes": "3773" } ], "symlink_target": "" }
import pytest import json from mock.mock import Mock from insights.core.dr import SkipComponent from insights.core import filters from insights.specs import Specs from insights.core.spec_factory import DatasourceProvider from insights.specs.datasources.semanage import LocalSpecs, users_count_map_selinux_user SEMANGE_LOGIN_LIST_OUTPUT1 = """ Login Name SELinux User MLS/MCS Range Service %groupb staff_u s0-s0:c0.c1023 * __default__ unconfined_u s0-s0:c0.c1023 * testa staff_u s0 * root unconfined_u s0-s0:c0.c1023 * """ SEMANGE_LOGIN_LIST_OUTPUT2 = """ Login Name SELinux User MLS/MCS Range Service __default__ unconfined_u s0-s0:c0.c1023 * systemu systemu s0-s0:c0.c1023 * root unconfined_u s0-s0:c0.c1023 * """ RELATIVE_PATH = 'insights_commands/linux_users_count_map_selinux_user' def setup_function(func): if Specs.selinux_users in filters._CACHE: del filters._CACHE[Specs.selinux_users] if Specs.selinux_users in filters.FILTERS: del filters.FILTERS[Specs.selinux_users] if func is test_linux_users_count_map_staff_u: filters.add_filter(Specs.selinux_users, ["staff_u"]) if func is test_linux_users_count_map_more_selinux_users: filters.add_filter(Specs.selinux_users, ["staff_u", "unconfined_u"]) if func is test_linux_users_count_map_staff_u_except: filters.add_filter(Specs.selinux_users, []) def test_linux_users_count_map_staff_u(): selinux_list = Mock() selinux_list.content = SEMANGE_LOGIN_LIST_OUTPUT1.splitlines() broker = { LocalSpecs.selinux_user_mapping: selinux_list } result = users_count_map_selinux_user(broker) assert result is not None assert isinstance(result, DatasourceProvider) data = {'staff_u': 2} expected = DatasourceProvider(json.dumps(data), relative_path=RELATIVE_PATH) assert sorted(result.content) == sorted(expected.content) assert result.relative_path == expected.relative_path def test_linux_users_count_map_more_selinux_users(): selinux_list = Mock() selinux_list.content = SEMANGE_LOGIN_LIST_OUTPUT1.splitlines() broker = { LocalSpecs.selinux_user_mapping: selinux_list } result = users_count_map_selinux_user(broker) assert result is not None assert isinstance(result, DatasourceProvider) data = {'staff_u': 2, "unconfined_u": 2} expected = DatasourceProvider(json.dumps(data), relative_path=RELATIVE_PATH) assert sorted(result.content) == sorted(expected.content) assert result.relative_path == expected.relative_path def test_linux_users_count_map_staff_u_except(): selinux_list = Mock() selinux_list.content = SEMANGE_LOGIN_LIST_OUTPUT2.splitlines() broker = { LocalSpecs.selinux_user_mapping: selinux_list } with pytest.raises(SkipComponent): users_count_map_selinux_user(broker)
{ "content_hash": "d24e8788a1e96331d33008cb673c0ec3", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 88, "avg_line_length": 37.035714285714285, "alnum_prop": 0.6589521054323368, "repo_name": "RedHatInsights/insights-core", "id": "bc5a72fb667c2e585e0991e72b691f97bd5c256d", "size": "3111", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "insights/tests/datasources/test_semanage.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "220" }, { "name": "Python", "bytes": "8219046" }, { "name": "Shell", "bytes": "1754" } ], "symlink_target": "" }
""" Created on Wed May 17 14:40:12 2017 @author: Mihaela Filters the traffic from specific weekdays, between specific hours Input: Structure of lists: dataframe = df weekdays =[day_of_week, ...] =[0-6, ...] Monday = 0, Tuesday = 1, ..., Sunday = 6 times = [[(hour, minute), (hour,minute)], ...] = [[(0-23,0-59), (0-23,0-59)], ...] each sublist contains two tuples first tuple = start time second tuple = end time example: weekdays = [0,1] times = [[(6,00), (7,50)], [(12,00), (12,20)]] get_traffic(weekdays, times) filters the traffic from Mondays and Tuesdays between 6:00-7:50 and 12:00-12:20 """ import numpy as np import pandas as pd from src.misc import paths as path def get_traffic(df, weekdays, times): # transform starting_time column from string to timestamp df['starting_time'] = pd.to_datetime(df['starting_time']) # extract separately date and time (hour, minutes) from timestamp extracted_hour = df['starting_time'].dt.hour extracted_minute = df['starting_time'].dt.minute extracted_day = df['starting_time'].dt.dayofweek # add 'extracted_day' and 'hour_min_tuples' columns to dataset df['extracted_day'] = extracted_day df['hour_min_tuples'] = list(zip(extracted_hour, extracted_minute)) # craete day_mask day_mask = df['extracted_day'].isin(weekdays) # create time_mask time_mask = [False] * df.shape[0] for i in times: time_mask = time_mask | ((df['hour_min_tuples'] >= i[0]) & (df['hour_min_tuples'] <= tuple(np.subtract(i[1],(0,1))))) #create complete_mask complete_mask = day_mask & time_mask #take last added columns out del df['extracted_day'] del df['hour_min_tuples'] #output return(df.loc[complete_mask]) #test # df = pd.DataFrame.from_csv(path.trajectories_training_file, index_col=[0,1,2]) # weekdays = [0] # times = [[(6,00), (7,50)]] # print(get_traffic(df, weekdays, times))
{ "content_hash": "35d3f5353fbee8d9ebfeb4c69314c1b6", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 125, "avg_line_length": 30.690140845070424, "alnum_prop": 0.5759522716842589, "repo_name": "Superchicken1/SambaFlow", "id": "72341a5562cde79f37fc1e623c443bd49fbded88", "size": "2179", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/traffic-prediction/src/misc/get_traffic.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13313" }, { "name": "HTML", "bytes": "42196" }, { "name": "Java", "bytes": "79242" }, { "name": "JavaScript", "bytes": "34381" }, { "name": "Jupyter Notebook", "bytes": "5129718" }, { "name": "Python", "bytes": "93724" } ], "symlink_target": "" }
import urllib import json import os from flask import Flask from flask import request from flask import make_response import requests # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = makeWebhookResult(req) res = json.dumps(res, indent=4) print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def creatDraft(parameters): categories = {'jeans': "11483", 'camera': "31388"} condtions = {'new': "1000", 'used': "3000"} item = parameters.get("item") condtion = parameters.get("condtion") brand = parameters.get("brand") model = parameters.get("model") url = "http://aac9ae49.ngrok.io/experience/consumer_selling/v1/listing_draft/create_and_open?mode=AddItem" payload = { "requestListing": { "item": { "title": item }, "categoryId": categories[item], "condition": "1000" } } headers = { 'Authorization': "Bearer v^1.1#i^1#r^1#f^0#I^3#p^3#t^Ul4xMF8yOjBEMUVDODQxMTZBMzQ2QkNFQjM4MUE1MkEyNDREOEIxXzFfMSNFXjUxNg==", 'X-EBAY-C-ENDUSERCTX': "deviceId=4fe2d65bc464493aa9babd91aa259027,userAgent=Mozilla%2F5.0+%28iPad%3B+CPU+OS+7_0+like+Mac+OS+X%29+AppleWebKit%2F537.51.1+%28KHTML%2C+like+Gecko%29+Version%2F7.0+Mobile%2F11A465+Safari%2F9537.53", 'X-EBAY-C-MARKETPLACE-ID': "EBAY-US", 'Content-Type': "application/json", 'X-EBAY-C-TRACKING': "cguid=049205231500a62960f4f874f775d8e2595427bd,tguid=049229b01504050a19118a10011c1e36595427bd,pageid=2380506,guid=049229b01504050a19118a10011c1e36", 'Accept': 'application/json' } response = requests.post(url, json=payload, headers=headers) if response.status_code == 200: responseJS = json.loads(response.content) return responseJS else: return response.status_code def updateItem(draftId,data,startPrice): url = "http://aac9ae49.ngrok.io/experience/consumer_selling/v1/listing_draft/" + str(draftId) + "?mode=AddItem" payload = { "requestListing": { "item": { "itemSpecific": [ { "name": "Brand", "value": [ data["brand"] ] }, { "name": "Model", "value": [ data["model"] ] }, { "name": "Series", "value": [ "58mm" ] } ], "title": str(data["condition"]) + " " + str(data["brand"]) + " " + str(data["model"]) + " " + str(data["item"]), "picture": [ { "url": "https://www.dpreview.com/files/p/articles/2952771527/Images/frontview.jpeg" }, { "url": "https://www.dpreview.com/files/p/articles/2952771527/Images/frontview.jpeg" }, { "url": "https://www.dpreview.com/files/p/articles/2952771527/Images/frontview.jpeg" }, { "url": "https://www.dpreview.com/files/p/articles/2952771527/Images/frontview.jpeg" } ], "description": "<div style=\"font-family: Arial; font-size:0.8125rem;\"><font face=\"Arial\" size=\"2\">add desc</font><br><br><br></div>" }, "condition": "1000", "price": 20, "startPrice": startPrice, "format": "Auction", "listingInfo": { "conditionDescription": "New with box", "description": "<div style=\"font-family: Arial; font-size:0.8125rem;\"><font face=\"Arial\" size=\"2\">add desc</font><br><br><br></div>" }, "categoryId": "31388", "previousShippingType": "SHIP_RECO_0", "serviceContextMeta": "{\"restrictedRevise\":false,\"sellerSegment\":\"NEW\",\"recommendedStartPrice\":0.99,\"recommendedBinPrice\":3.99,\"format\":\"FixedPrice\",\"price\":3.99,\"featureQualifiedList\":\"2033,2034\"}" }, "updatedModules": [ "PHOTOS", "ASPECTS_MODULE", "CONDITION", "DESCRIPTION", "PRICE", "LISTINGINFO" ], "userInteractedModules": "ASPECTS_MODULE,PHOTOS,DESCRIPTION,PRICE" } headers = { 'Authorization': "Bearer v^1.1#i^1#r^1#f^0#I^3#p^3#t^Ul4xMF8yOjBEMUVDODQxMTZBMzQ2QkNFQjM4MUE1MkEyNDREOEIxXzFfMSNFXjUxNg==", 'X-EBAY-C-ENDUSERCTX': "deviceId=4fe2d65bc464493aa9babd91aa259027,userAgent=Mozilla%2F5.0+%28iPad%3B+CPU+OS+7_0+like+Mac+OS+X%29+AppleWebKit%2F537.51.1+%28KHTML%2C+like+Gecko%29+Version%2F7.0+Mobile%2F11A465+Safari%2F9537.53", 'X-EBAY-C-MARKETPLACE-ID': "EBAY-US", 'Content-Type': "application/json" } response = requests.put(url, json=payload, headers=headers) responseJS = json.loads(response.content) return responseJS def publishItem(draftId, paypal_account): url = "http://aac9ae49.ngrok.io/experience/consumer_selling/v1/listing_draft/" + str(draftId) + "/publish?mode=AddItem" payload = { "requestListing": { "paymentInfo": { "paypalEmailAddress": paypal_account } } } headers = { 'Authorization': "Bearer v^1.1#i^1#r^1#f^0#I^3#p^3#t^Ul4xMF8yOjBEMUVDODQxMTZBMzQ2QkNFQjM4MUE1MkEyNDREOEIxXzFfMSNFXjUxNg==", 'X-EBAY-C-ENDUSERCTX': "deviceId=4fe2d65bc464493aa9babd91aa259027,userAgent=Mozilla%2F5.0+%28iPad%3B+CPU+OS+7_0+like+Mac+OS+X%29+AppleWebKit%2F537.51.1+%28KHTML%2C+like+Gecko%29+Version%2F7.0+Mobile%2F11A465+Safari%2F9537.53", 'X-EBAY-C-MARKETPLACE-ID': "EBAY-US", 'Content-Type': "application/json" } response = requests.post(url, json=payload, headers=headers) responseJS = json.loads(response.content) return responseJS def makeWebhookResult(req): result = req.get("result") parameters = result.get("parameters") if req.get("result").get("action") == "item.create": item = parameters.get("item") brand = parameters.get("brand") condition = parameters.get("condtion") model = parameters.get("model") responseJS = creatDraft(parameters) #draftId = responseJS["modules"]['SELL_NODE_CTA']['paramList']['draftId'] draftId = responseJS["meta"]['requestParameters']['draftId'] startPrice = responseJS["modules"]['PRICE']['bestChanceToSell']['price']['value'] with open('tempData.json', 'w') as f: json.dump({"latestDraftId": draftId, "brand": brand, "condition": condition, "model": model, "item":item, "startPrice":startPrice}, f) speech = 'Sweet. We recommend you to sell with 7-day auctions starting at $' + startPrice + ' according to similar items.'\ 'Want to go with that?' text = 'Sweet. We recommend you to sell with 7-day auctions starting at $' + startPrice + ' according to similar items.'\ 'Want to go with that??' responseData = {} elif req.get("result").get("action") == "item.update": with open('tempData.json') as f: data = json.load(f) draftId = data["latestDraftId"] customPrice = parameters.get("number") #update the start price with open("tempData.json", "r") as f: data = json.load(f) data["startPrice"] = customPrice with open("tempData.json", "w") as f: json.dump(data, f) updateItemResponse = updateItem(draftId, data, customPrice) speech = "Ok! \n We have changed your starting price to $" + str(customPrice) + " . Are you ready to list? " text = "Ok! \n We have changed your starting price to $" + str(customPrice) + " . Are you ready to list? " responseData = {} elif req.get("result").get("action") == "item.publish": paypal_account = parameters.get("paypal_account") with open('tempData.json') as f: data = json.load(f) draftId = data["latestDraftId"] customPrice = data["startPrice"] updateItemResponse = updateItem(draftId, data, customPrice) publishItemResponse = publishItem(draftId, paypal_account) itemId = publishItemResponse['meta']['requestParameters']['itemId'] with open('item_papyal.json', 'a') as f: json.dump({itemId:paypal_account}, f) speech = 'Congratuations! Your item has been published successfully on eBay with item ID as displayed.' text = 'Congratuations! Your item has been published successfully on eBay with item ID ' + itemId +'.' responseData = { "google": { "expect_user_response": False, "rich_response": { "items": [ { "simpleResponse": { "textToSpeech":"Congratuations! Your item has been published successfully on eBay. Touch to view." } }, { "basicCard": { "title":str(data["condition"]) + " " + str(data["brand"]) + " " + str(data["model"]) + " " + str(data["item"]), "subtitle": "7-day auction start with $" + str(customPrice), "image": { "url":"https://www.dpreview.com/files/p/articles/2952771527/Images/frontview.jpeg", "accessibilityText":"Image alternate text" }, "buttons": [ { "title":"View my item", "openUrlAction":{ "url":"http://www.qa.ebay.com/itm/" + str(itemId) + "?ssPageName=STRK:MESELX:IT&_trksid=p3984.m1555.l2649" } } ] } } ] } } } elif req.get("result").get("action") == "item.revise": pass else: return {} print("Response:") print(speech) print(text) return { "speech": speech, "displayText": text, "data": responseData, # "contextOut": [], "source": "apiai-onlinestore-shipping" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print ("Starting app on port %d" % port) app.run(debug=True, port=port, host='0.0.0.0')
{ "content_hash": "6428c80d1d857f02d46f0913121b6914", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 234, "avg_line_length": 37.346020761245676, "alnum_prop": 0.5548040396553322, "repo_name": "zhan4402/list-help-bot", "id": "0cb6d0901cd81aaa058abdddcbd9615a8231b707", "size": "10816", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "10816" } ], "symlink_target": "" }
""" Django settings for sais project. Generated by 'django-admin startproject' using Django 1.8. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'sf-#u-ws#at)lo5)q$0ey(1t3fhm1=yxx)k8ik8^rve8b8gx#e' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Urls para auth from django.core.urlresolvers import reverse_lazy LOGIN_URL = reverse_lazy('login') LOGIN_REDIRECT_URL = reverse_lazy('home') LOGOUT_URL = reverse_lazy('logout') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'apps.usersProfiles', 'apps.inventarios', 'apps.home', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'sais.urls' MEDIA_ROOT = BASE_DIR + '/media/' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.media', 'django.core.context_processors.request', ], }, }, ] WSGI_APPLICATION = 'sais.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'es-es' TIME_ZONE = 'America/Guayaquil' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), # '/var/www/static/', )
{ "content_hash": "c41f567eddf83881e9f6f712065608d3", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 71, "avg_line_length": 26.57983193277311, "alnum_prop": 0.6860575403098325, "repo_name": "giojavi04/Test-Sais", "id": "99debe21b9ab536e3f59f8414b6bc6ebeed28420", "size": "3163", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sais/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "360900" }, { "name": "HTML", "bytes": "25463" }, { "name": "JavaScript", "bytes": "1505456" }, { "name": "Python", "bytes": "16417" } ], "symlink_target": "" }
""" ./e04twostage.py http://camlistore.org 1 6 Found 10 urls http://camlistore.org/ frequencies: [('camlistore', 13), ...] ... First integer arg is depth, second is minimum word count. """ from queue import Queue import re from sys import argv from threading import Thread from e01extract import canonicalize, extract def parallel_wordcount(start_url, max_depth, word_length): fetch_queue = Queue() # (crawl_depth, url) fetch_queue.put((0, canonicalize(start_url))) count_queue = Queue() # (url, data) seen_urls = set() func = lambda: fetcher(fetch_queue, max_depth, seen_urls, count_queue) for _ in range(3): Thread(target=func, daemon=True).start() result = [] func = lambda: counter(count_queue, word_length, result) for _ in range(3): Thread(target=func, daemon=True).start() fetch_queue.join() count_queue.join() return result def fetcher(fetch_queue, max_depth, seen_urls, output_queue): while True: depth, url = fetch_queue.get() try: if depth > max_depth: continue # Ignore URLs that are too deep if url in seen_urls: continue # Prevent infinite loops seen_urls.add(url) # GIL :/ try: _, data, found_urls = extract(url) except Exception: continue output_queue.put((url, data)) for found in found_urls: fetch_queue.put((depth + 1, found)) finally: fetch_queue.task_done() def counter(count_queue, word_length, result): while True: url, data = count_queue.get() try: counts = {} for match in re.finditer('\w{%d,100}' % word_length, data): word = match.group(0).lower() counts[word] = counts.get(word, 0) + 1 result.append((url, counts)) # GIL :( finally: count_queue.task_done() def get_popular_words(counts): ranked = sorted(counts.items(), key=lambda x: x[1], reverse=True) return ranked[:10] def print_popular_words(result): print('Found %d urls' % len(result)) for url, counts in result: print('%s frequencies: %s' % (url, get_popular_words(counts))) def main(): result = parallel_wordcount(argv[1], int(argv[2]), int(argv[3])) print_popular_words(result) if __name__ == '__main__': main()
{ "content_hash": "f84e621ec85e8138e9fb66292907d37e", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 75, "avg_line_length": 26.92222222222222, "alnum_prop": 0.5864630623194387, "repo_name": "bslatkin/pycon2014", "id": "89af1caa963a0b213c8804f268d6b47e9290a098", "size": "2447", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "e04twostage.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "38501" }, { "name": "Python", "bytes": "813564" } ], "symlink_target": "" }
import re from datetime import datetime, timedelta import xlrd import uuid from string import Template from xml.sax.saxutils import escape from dateutil.parser import parse as parse_date from scrapy.spider import BaseSpider from scrapy.contrib.loader import ItemLoader from scrapy.http import Request, Response, TextResponse from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join from scrapy.shell import inspect_response from scrapy import log from nrc.items import PA_Violation, FeedEntry, FeedEntryTag, PA_DrillingPermit from nrc.database import NrcDatabase from nrc.NrcBot import NrcBot from nrc.AtomPubScraper import AtomPubScraper class PAViolationScraper (AtomPubScraper): name = 'PAViolationScraper' allowed_domains = None current_inspection = [] def process_item(self, task): from_date = parse_date('11-01-2012', fuzzy=1) to_date = parse_date('12-31-2013', fuzzy=1) if 'from_date' in task and 'to_date' in task: from_date = parse_date(task['from_date'], fuzzy=1) to_date = parse_date(task['to_date'], fuzzy=1) elif 'date_offset' in task: to_date = datetime.today() from_date = to_date - timedelta(days=int(task['date_offset'])) date_fmt = "%m/%d/%Y 23:59:59" target_url = ("%s&P_DATE_INSPECTED_FROM=%s&P_DATE_INSPECTED_TO=%s" % (task['target_url'], from_date.strftime(date_fmt), to_date.strftime(date_fmt))) request = Request (target_url, callback=self.parse_xml) self.log('Downloading xml from url %s' % (target_url), log.INFO) request.meta['task'] = task yield request def process_row (self, row, task): l=ItemLoader (PA_Violation()) l.County_in = lambda slist: [s[:20] for s in slist] l.Municipality_in = lambda slist: [s[:20] for s in slist] l.add_value ('InspectionID', row['INSPECTION_ID']) l.add_value ('ViolationID', row['VIOLATION_ID']) l.add_value ('EnforcementID', row['ENFORCEMENT_ID']) l.add_value ('Operator', row['OPERATOR']) l.add_value ('Region', row['REGION']) l.add_value ('InspectionDate', self.parse_date(row['INSPECTION_DATE'])) l.add_value ('InspectionType', row['INSPECTION_TYPE']) # l.add_value ('Permit_API', row['PERMIT_NUM']) l.add_value ('Permit_API', row['API_PERMIT']) # l.add_value ('IsMarcellus', row['MARCELLUS_IND1']) l.add_value ('InspectionCategory', row['INSPECTION_CATEGORY']) l.add_value ('County', row['COUNTY']) l.add_value ('Municipality', row['MUNICIPALITY']) l.add_value ('InspectionResult', row['INSPECTION_RESULT_DESCRIPTION']) l.add_value ('InspectionComment', row['INSPECTION_COMMENT']) l.add_value ('ViolationDate', self.parse_date(row['VIOLATION_DATE'])) l.add_value ('ViolationCode', row['VIOLATION_CODE']) l.add_value ('ViolationType', row['VIOLATION_TYPE']) l.add_value ('ViolationComment', row['VIOLATION_COMMENT']) l.add_value ('ResolvedDate', self.parse_date(row['RESOLVED_DATE'])) l.add_value ('EnforcementCode', row['ENFORCEMENT_CODE_DESCRIPTION']) l.add_value ('PenaltyFinalStatus', row['PENALTY_FINAL_STATUS_CODE_DESCRIPTION']) # l.add_value ('PenaltyDateFinal', self.parse_date(row['COMPLETED_DATE'])) l.add_value ('PenaltyDateFinal', self.parse_date(row['PENALTY_FINAL_DATE'])) # l.add_value ('EnforcementDateFinal', self.parse_date(row['FINAL_ENFORCEMENT_DATE'])) l.add_value ('EnforcementDateFinal', self.parse_date(row['ENFORCEMENT_FINAL_DATE'])) l.add_value ('PenaltyAmount', row['PENALTY_AMOUNT']) l.add_value ('TotalAmountCollected', row['TOTAL_AMOUNT_COLLECTED']) l.add_value ('Unconventional', row['UNCONVENTIONAL']) item = l.load_item() # print item['ViolationID'] if item['ViolationID']: stats = self.crawler.stats existing_item = self.db.loadItem (item, {'ViolationID': item['ViolationID']}) if existing_item: # diff = item.contentDiff (existing_item) # if diff: # self.send_alert ('PA Permit values in %s have changed since previous scrape\n\n%s' % (item, diff)) # self.log ('PA Permit values in %s have changed since previous scrape\n\n%s' % (item, diff), log.ERROR) # stats.inc_value ('_error_count', spider=self) # else: # self.log('Skipping existing item %s' % (item), log.DEBUG) # stats.inc_value ('_unchanged_count', spider=self) stats.inc_value ('_existing_count', spider=self) else: stats.inc_value ('_new_count', spider=self) yield item params = dict(item) for f in item.fields: params[f] = escape ("%s" % params.get(f,'')) if self.current_inspection and params['InspectionID'] != self.current_inspection[0]['InspectionID']: for entry in self.create_feed_entry (self.current_inspection, task): yield entry self.current_inspection = [params] else: self.current_inspection.append (params) def finalize_rows (self, task): if self.current_inspection: for entry in self.create_feed_entry (self.current_inspection, task): yield entry def create_feed_entry (self, inspection, task): # create a new feed item l=ItemLoader (FeedEntry()) params = inspection[0] params['Operator'] = params['Operator'].title() url = "%s/%s" % (task['target_url'], params['InspectionID']) #feed_entry_id = uuid.uuid3(uuid.NAMESPACE_URL, url.encode('ASCII')) feed_entry_id = self.db.uuid3_str(name=url.encode('ASCII')) l.add_value ('id', feed_entry_id) l.add_value ('title', "PA Permit Violation Issued to %(Operator)s in %(Municipality)s, %(County)s County" % params) l.add_value ('incident_datetime', params['InspectionDate']) l.add_value ('link', task['about_url']) params['InspectionDate'] = params['InspectionDate'][0:10] params['ViolationDate'] = params['ViolationDate'][0:10] l.add_value ('summary', self.summary_template().substitute(params)) l.add_value ('content', self.create_feed_entry_content(inspection)) location = self.find_location (params) if location: l.add_value ('lat', location['lat']) l.add_value ('lng', location['lng']) l.add_value ('source_id', 9) feed_item = l.load_item() if feed_item.get('lat') and feed_item.get('lng'): yield feed_item yield self.create_tag (feed_entry_id, 'PADEP') yield self.create_tag (feed_entry_id, 'frack') yield self.create_tag (feed_entry_id, 'violation') yield self.create_tag (feed_entry_id, 'drilling') if params.get('IsMarcellus') == 'Y': yield self.create_tag (feed_entry_id, 'marcellus') def create_tag (self, feed_entry_id, tag, comment = ''): l = ItemLoader (FeedEntryTag()) l.add_value ('feed_entry_id', feed_entry_id) l.add_value ('tag', tag) l.add_value ('comment', comment) return l.load_item() def create_feed_entry_content (self, inspection): violations = ''.join([self.violation_template().substitute(v) for v in inspection]) enforcements = {} for e in inspection: if e.get('EnforcementID'): enforcements[e['EnforcementID']] = e enforcements = ''.join([self.enforcement_template().substitute(v) for k,v in enforcements.items()]) inspection = self.inspection_template().substitute(inspection[0]) params = {'inspection': inspection, 'violations': violations, 'enforcements': enforcements} return self.content_template().substitute(params) def find_location(self, params): print params if params['Permit_API']: item = self.db.loadItem(PA_DrillingPermit(), {'Other_Id': params['Permit_API']}) if item: return {'lat': item.get('Latitude_Decimal'), 'lng': item.get('Longitude_Decimal')} self.log('No location info found for violation id: %s' % (params['ViolationID']), log.WARNING) return None def item_stored(self, item, id): self.item_new (id) pass def summary_template (self): return Template ("$ViolationType violation issued on $InspectionDate to $Operator in $Municipality, $County county. $ViolationCode") def content_template (self): return Template ( """$inspection<br/> <b>Violation(s)</b> <table> $violations </table> <b>Enforcement Action(s)</b> <table> <tr><th>ID</th><th>Code</th></tr> $enforcements </table> """) def inspection_template (self): return Template ( """<b>Report Details</b> <table width="100%"> <tr><th>Operator</th><td> $Operator</td></tr> <tr><th>Violation Type</th><td> $ViolationType</td></tr> <tr><th>Violation Date</th><td> $ViolationDate</td></tr> <tr><th>Violation Code</th><td> $ViolationCode</td></tr> <tr><th>Violation ID</th><td> $ViolationID</td></tr> <tr><th>Permit API</th><td> $Permit_API</td></tr> <tr><th>Unconventional</th><td> $Unconventional</td></tr> <tr><th>County</th><td> $County</td></tr> <tr><th>Municipality</th><td> $Municipality</td></tr> <tr><th>Inspection Type</th><td> $InspectionType</td></tr> <tr><th>Inspection Date</th><td> $InspectionDate</td></tr> <tr><th>Comments</th><td> $InspectionComment $ViolationComment</td></tr> </table> """) def violation_template (self): return Template ( """<tr><td> <strong>ID:</strong> $ViolationID <strong>Date:</strong> $ViolationDate <strong>Type:</strong> $ViolationType </td></tr> <tr><td>$ViolationCode $ViolationComment</td></tr> <tr><td></td></tr> """) def enforcement_template (self): return Template ( """<tr><td>$EnforcementID</td><td>$EnforcementCode</td></tr> """)
{ "content_hash": "1e7f259c8078e2a1eae8c40571eb8741", "timestamp": "", "source": "github", "line_count": 251, "max_line_length": 140, "avg_line_length": 40.9203187250996, "alnum_prop": 0.6120144095024828, "repo_name": "SkyTruth/scraper", "id": "770e41b02340994b8e79b8d4a42d2b5e38649b74", "size": "10302", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nrc/nrc/spiders/PAViolationScraper.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "2968" }, { "name": "PLpgSQL", "bytes": "940465" }, { "name": "Python", "bytes": "502777" }, { "name": "Shell", "bytes": "4368" } ], "symlink_target": "" }
"""The test for the Template sensor platform.""" from asyncio import Event from datetime import timedelta from unittest.mock import patch import pytest from homeassistant.bootstrap import async_from_config_dict from homeassistant.components import sensor from homeassistant.const import ( ATTR_ENTITY_PICTURE, ATTR_ICON, EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START, STATE_OFF, STATE_ON, STATE_UNAVAILABLE, STATE_UNKNOWN, ) from homeassistant.core import Context, CoreState, callback from homeassistant.helpers import entity_registry from homeassistant.helpers.template import Template from homeassistant.setup import ATTR_COMPONENT, async_setup_component import homeassistant.util.dt as dt_util from tests.common import async_fire_time_changed TEST_NAME = "sensor.test_template_sensor" @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "It {{ states.sensor.test_state.state }}." } }, }, }, ], ) async def test_template_legacy(hass, start_ha): """Test template.""" assert hass.states.get(TEST_NAME).state == "It ." hass.states.async_set("sensor.test_state", "Works") await hass.async_block_till_done() assert hass.states.get(TEST_NAME).state == "It Works." @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "icon_template": "{% if states.sensor.test_state.state == " "'Works' %}" "mdi:check" "{% endif %}", } }, }, }, ], ) async def test_icon_template(hass, start_ha): """Test icon template.""" assert hass.states.get(TEST_NAME).attributes.get("icon") == "" hass.states.async_set("sensor.test_state", "Works") await hass.async_block_till_done() assert hass.states.get(TEST_NAME).attributes["icon"] == "mdi:check" @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "entity_picture_template": "{% if states.sensor.test_state.state == " "'Works' %}" "/local/sensor.png" "{% endif %}", } }, }, }, ], ) async def test_entity_picture_template(hass, start_ha): """Test entity_picture template.""" assert hass.states.get(TEST_NAME).attributes.get("entity_picture") == "" hass.states.async_set("sensor.test_state", "Works") await hass.async_block_till_done() assert ( hass.states.get(TEST_NAME).attributes["entity_picture"] == "/local/sensor.png" ) @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "attribute,config,expected", [ ( "friendly_name", { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "friendly_name_template": "It {{ states.sensor.test_state.state }}.", } }, }, }, ("It .", "It Works."), ), ( "friendly_name", { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "friendly_name_template": "{{ 'It ' + states.sensor.test_state.state + '.'}}", } }, }, }, (None, "It Works."), ), ( "friendly_name", { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.fourohfour.state }}", "friendly_name_template": "It {{ states.sensor.test_state.state }}.", } }, }, }, ("It .", "It Works."), ), ( "test_attribute", { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "attribute_templates": { "test_attribute": "It {{ states.sensor.test_state.state }}." }, } }, }, }, ("It .", "It Works."), ), ], ) async def test_friendly_name_template(hass, attribute, expected, start_ha): """Test friendly_name template with an unknown value_template.""" assert hass.states.get(TEST_NAME).attributes.get(attribute) == expected[0] hass.states.async_set("sensor.test_state", "Works") await hass.async_block_till_done() assert hass.states.get(TEST_NAME).attributes[attribute] == expected[1] @pytest.mark.parametrize("count,domain", [(0, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": {"value_template": "{% if rubbish %}"} }, }, }, { "sensor": { "platform": "template", "sensors": { "test INVALID sensor": { "value_template": "{{ states.sensor.test_state.state }}" } }, }, }, { "sensor": { "platform": "template", "sensors": { "test_template_sensor": {"invalid"}, }, }, }, { "sensor": { "platform": "template", }, }, { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "not_value_template": "{{ states.sensor.test_state.state }}" } }, }, }, { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "test": { "value_template": "{{ states.sensor.test_sensor.state }}", "device_class": "foobarnotreal", } } }, }, }, ], ) async def test_template_syntax_error(hass, start_ha): """Test setup with invalid device_class.""" assert hass.states.async_all("sensor") == [] @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "It {{ states.sensor.test_state" ".attributes.missing }}." } }, }, }, ], ) async def test_template_attribute_missing(hass, start_ha): """Test missing attribute template.""" assert hass.states.get(TEST_NAME).state == STATE_UNAVAILABLE @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test1": { "value_template": "{{ states.sensor.test_sensor.state }}", "device_class": "temperature", }, "test2": { "value_template": "{{ states.sensor.test_sensor.state }}" }, }, }, }, ], ) async def test_setup_valid_device_class(hass, start_ha): """Test setup with valid device_class.""" assert hass.states.get("sensor.test1").attributes["device_class"] == "temperature" assert "device_class" not in hass.states.get("sensor.test2").attributes @pytest.mark.parametrize("load_registries", [False]) async def test_creating_sensor_loads_group(hass): """Test setting up template sensor loads group component first.""" order = [] after_dep_event = Event() async def async_setup_group(hass, config): # Make sure group takes longer to load, so that it won't # be loaded first by chance await after_dep_event.wait() order.append("group") return True async def async_setup_template( hass, config, async_add_entities, discovery_info=None ): order.append("sensor.template") return True async def set_after_dep_event(event): if event.data[ATTR_COMPONENT] == "sensor": after_dep_event.set() hass.bus.async_listen(EVENT_COMPONENT_LOADED, set_after_dep_event) with patch( "homeassistant.components.group.async_setup", new=async_setup_group, ), patch( "homeassistant.components.template.sensor.async_setup_platform", new=async_setup_template, ): await async_from_config_dict( {"sensor": {"platform": "template", "sensors": {}}, "group": {}}, hass ) await hass.async_block_till_done() assert order == ["group", "sensor.template"] @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_sensor.state }}", "availability_template": "{{ is_state('sensor.availability_sensor', 'on') }}", } }, }, }, ], ) async def test_available_template_with_entities(hass, start_ha): """Test availability tempalates with values from other entities.""" hass.states.async_set("sensor.availability_sensor", STATE_OFF) # When template returns true.. hass.states.async_set("sensor.availability_sensor", STATE_ON) await hass.async_block_till_done() # Device State should not be unavailable assert hass.states.get(TEST_NAME).state != STATE_UNAVAILABLE # When Availability template returns false hass.states.async_set("sensor.availability_sensor", STATE_OFF) await hass.async_block_till_done() # device state should be unavailable assert hass.states.get(TEST_NAME).state == STATE_UNAVAILABLE @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "invalid_template": { "value_template": "{{ states.sensor.test_sensor.state }}", "attribute_templates": { "test_attribute": "{{ states.sensor.unknown.attributes.picture }}" }, } }, }, }, ], ) async def test_invalid_attribute_template(hass, caplog, start_ha, caplog_setup_text): """Test that errors are logged if rendering template fails.""" hass.states.async_set("sensor.test_sensor", "startup") await hass.async_block_till_done() assert len(hass.states.async_all()) == 2 hass.bus.async_fire(EVENT_HOMEASSISTANT_START) await hass.async_block_till_done() await hass.helpers.entity_component.async_update_entity("sensor.invalid_template") assert "TemplateError" in caplog_setup_text assert "test_attribute" in caplog.text @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "my_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "availability_template": "{{ x - 12 }}", } }, }, }, ], ) async def test_invalid_availability_template_keeps_component_available( hass, start_ha, caplog_setup_text ): """Test that an invalid availability keeps the device available.""" assert hass.states.get("sensor.my_sensor").state != STATE_UNAVAILABLE assert "UndefinedError: 'x' is undefined" in caplog_setup_text async def test_no_template_match_all(hass, caplog): """Test that we allow static templates.""" hass.states.async_set("sensor.test_sensor", "startup") hass.state = CoreState.not_running await async_setup_component( hass, sensor.DOMAIN, { "sensor": { "platform": "template", "sensors": { "invalid_state": {"value_template": "{{ 1 + 1 }}"}, "invalid_icon": { "value_template": "{{ states.sensor.test_sensor.state }}", "icon_template": "{{ 1 + 1 }}", }, "invalid_entity_picture": { "value_template": "{{ states.sensor.test_sensor.state }}", "entity_picture_template": "{{ 1 + 1 }}", }, "invalid_friendly_name": { "value_template": "{{ states.sensor.test_sensor.state }}", "friendly_name_template": "{{ 1 + 1 }}", }, "invalid_attribute": { "value_template": "{{ states.sensor.test_sensor.state }}", "attribute_templates": {"test_attribute": "{{ 1 + 1 }}"}, }, }, } }, ) await hass.async_block_till_done() assert hass.states.get("sensor.invalid_state").state == "unknown" assert hass.states.get("sensor.invalid_icon").state == "unknown" assert hass.states.get("sensor.invalid_entity_picture").state == "unknown" assert hass.states.get("sensor.invalid_friendly_name").state == "unknown" await hass.async_block_till_done() assert len(hass.states.async_all()) == 6 assert hass.states.get("sensor.invalid_state").state == "unknown" assert hass.states.get("sensor.invalid_icon").state == "unknown" assert hass.states.get("sensor.invalid_entity_picture").state == "unknown" assert hass.states.get("sensor.invalid_friendly_name").state == "unknown" assert hass.states.get("sensor.invalid_attribute").state == "unknown" hass.bus.async_fire(EVENT_HOMEASSISTANT_START) await hass.async_block_till_done() assert hass.states.get("sensor.invalid_state").state == "2" assert hass.states.get("sensor.invalid_icon").state == "startup" assert hass.states.get("sensor.invalid_entity_picture").state == "startup" assert hass.states.get("sensor.invalid_friendly_name").state == "startup" assert hass.states.get("sensor.invalid_attribute").state == "startup" hass.states.async_set("sensor.test_sensor", "hello") await hass.async_block_till_done() assert hass.states.get("sensor.invalid_state").state == "2" # Will now process because we have at least one valid template assert hass.states.get("sensor.invalid_icon").state == "hello" assert hass.states.get("sensor.invalid_entity_picture").state == "hello" assert hass.states.get("sensor.invalid_friendly_name").state == "hello" assert hass.states.get("sensor.invalid_attribute").state == "hello" await hass.helpers.entity_component.async_update_entity("sensor.invalid_state") await hass.helpers.entity_component.async_update_entity("sensor.invalid_icon") await hass.helpers.entity_component.async_update_entity( "sensor.invalid_entity_picture" ) await hass.helpers.entity_component.async_update_entity( "sensor.invalid_friendly_name" ) await hass.helpers.entity_component.async_update_entity("sensor.invalid_attribute") assert hass.states.get("sensor.invalid_state").state == "2" assert hass.states.get("sensor.invalid_icon").state == "hello" assert hass.states.get("sensor.invalid_entity_picture").state == "hello" assert hass.states.get("sensor.invalid_friendly_name").state == "hello" assert hass.states.get("sensor.invalid_attribute").state == "hello" @pytest.mark.parametrize("count,domain", [(1, "template")]) @pytest.mark.parametrize( "config", [ { "template": { "unique_id": "group-id", "sensor": {"name": "top-level", "unique_id": "sensor-id", "state": "5"}, }, "sensor": { "platform": "template", "sensors": { "test_template_sensor_01": { "unique_id": "not-so-unique-anymore", "value_template": "{{ true }}", }, "test_template_sensor_02": { "unique_id": "not-so-unique-anymore", "value_template": "{{ false }}", }, }, }, }, ], ) async def test_unique_id(hass, start_ha): """Test unique_id option only creates one sensor per id.""" assert len(hass.states.async_all()) == 2 ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ( ent_reg.async_get_entity_id("sensor", "template", "group-id-sensor-id") is not None ) assert ( ent_reg.async_get_entity_id("sensor", "template", "not-so-unique-anymore") is not None ) @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "solar_angle": { "friendly_name": "Sun angle", "unit_of_measurement": "degrees", "value_template": "{{ state_attr('sun.sun', 'elevation') }}", }, "sunrise": { "value_template": "{{ state_attr('sun.sun', 'next_rising') }}" }, }, } }, ], ) async def test_sun_renders_once_per_sensor(hass, start_ha): """Test sun change renders the template only once per sensor.""" now = dt_util.utcnow() hass.states.async_set( "sun.sun", "above_horizon", {"elevation": 45.3, "next_rising": now} ) await hass.async_block_till_done() assert len(hass.states.async_all()) == 3 assert hass.states.get("sensor.solar_angle").state == "45.3" assert hass.states.get("sensor.sunrise").state == str(now) async_render_calls = [] @callback def _record_async_render(self, *args, **kwargs): """Catch async_render.""" async_render_calls.append(self.template) return "mocked" later = dt_util.utcnow() with patch.object(Template, "async_render", _record_async_render): hass.states.async_set("sun.sun", {"elevation": 50, "next_rising": later}) await hass.async_block_till_done() assert hass.states.get("sensor.solar_angle").state == "mocked" assert hass.states.get("sensor.sunrise").state == "mocked" assert len(async_render_calls) == 2 assert set(async_render_calls) == { "{{ state_attr('sun.sun', 'elevation') }}", "{{ state_attr('sun.sun', 'next_rising') }}", } @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test": { "value_template": "{{ ((states.sensor.test.state or 0) | int) + 1 }}", }, }, } }, ], ) async def test_self_referencing_sensor_loop(hass, start_ha, caplog_setup_text): """Test a self referencing sensor does not loop forever.""" assert len(hass.states.async_all()) == 1 await hass.async_block_till_done() await hass.async_block_till_done() assert "Template loop detected" in caplog_setup_text assert int(hass.states.get("sensor.test").state) == 2 await hass.async_block_till_done() assert int(hass.states.get("sensor.test").state) == 2 @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test": { "value_template": "{{ ((states.sensor.test.state or 0) | int) + 1 }}", "icon_template": "{% if ((states.sensor.test.state or 0) | int) >= 1 %}mdi:greater{% else %}mdi:less{% endif %}", }, }, } }, ], ) async def test_self_referencing_sensor_with_icon_loop( hass, start_ha, caplog_setup_text ): """Test a self referencing sensor loops forever with a valid self referencing icon.""" assert len(hass.states.async_all()) == 1 await hass.async_block_till_done() await hass.async_block_till_done() assert "Template loop detected" in caplog_setup_text state = hass.states.get("sensor.test") assert int(state.state) == 3 assert state.attributes[ATTR_ICON] == "mdi:greater" await hass.async_block_till_done() state = hass.states.get("sensor.test") assert int(state.state) == 3 @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test": { "value_template": "{{ ((states.sensor.test.state or 0) | int) + 1 }}", "icon_template": "{% if ((states.sensor.test.state or 0) | int) > 3 %}mdi:greater{% else %}mdi:less{% endif %}", "entity_picture_template": "{% if ((states.sensor.test.state or 0) | int) >= 1 %}bigpic{% else %}smallpic{% endif %}", }, }, } }, ], ) async def test_self_referencing_sensor_with_icon_and_picture_entity_loop( hass, start_ha, caplog_setup_text ): """Test a self referencing sensor loop forevers with a valid self referencing icon.""" assert len(hass.states.async_all()) == 1 await hass.async_block_till_done() await hass.async_block_till_done() assert "Template loop detected" in caplog_setup_text state = hass.states.get("sensor.test") assert int(state.state) == 4 assert state.attributes[ATTR_ICON] == "mdi:less" assert state.attributes[ATTR_ENTITY_PICTURE] == "bigpic" await hass.async_block_till_done() assert int(state.state) == 4 @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test": { "value_template": "{{ 1 }}", "entity_picture_template": "{{ ((states.sensor.test.attributes['entity_picture'] or 0) | int) + 1 }}", }, }, } }, ], ) async def test_self_referencing_entity_picture_loop(hass, start_ha, caplog_setup_text): """Test a self referencing sensor does not loop forever with a looping self referencing entity picture.""" assert len(hass.states.async_all()) == 1 next_time = dt_util.utcnow() + timedelta(seconds=1.2) with patch( "homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time ): async_fire_time_changed(hass, next_time) await hass.async_block_till_done() await hass.async_block_till_done() assert "Template loop detected" in caplog_setup_text state = hass.states.get("sensor.test") assert int(state.state) == 1 assert state.attributes[ATTR_ENTITY_PICTURE] == 2 await hass.async_block_till_done() assert int(state.state) == 1 async def test_self_referencing_icon_with_no_loop(hass, caplog): """Test a self referencing icon that does not loop.""" hass.states.async_set("sensor.heartworm_high_80", 10) hass.states.async_set("sensor.heartworm_low_57", 10) hass.states.async_set("sensor.heartworm_avg_64", 10) hass.states.async_set("sensor.heartworm_avg_57", 10) value_template_str = """{% if (states.sensor.heartworm_high_80.state|int >= 10) and (states.sensor.heartworm_low_57.state|int >= 10) %} extreme {% elif (states.sensor.heartworm_avg_64.state|int >= 30) %} high {% elif (states.sensor.heartworm_avg_64.state|int >= 14) %} moderate {% elif (states.sensor.heartworm_avg_64.state|int >= 5) %} slight {% elif (states.sensor.heartworm_avg_57.state|int >= 5) %} marginal {% elif (states.sensor.heartworm_avg_57.state|int < 5) %} none {% endif %}""" icon_template_str = """{% if is_state('sensor.heartworm_risk',"extreme") %} mdi:hazard-lights {% elif is_state('sensor.heartworm_risk',"high") %} mdi:triangle-outline {% elif is_state('sensor.heartworm_risk',"moderate") %} mdi:alert-circle-outline {% elif is_state('sensor.heartworm_risk',"slight") %} mdi:exclamation {% elif is_state('sensor.heartworm_risk',"marginal") %} mdi:heart {% elif is_state('sensor.heartworm_risk',"none") %} mdi:snowflake {% endif %}""" await async_setup_component( hass, sensor.DOMAIN, { "sensor": { "platform": "template", "sensors": { "heartworm_risk": { "value_template": value_template_str, "icon_template": icon_template_str, }, }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() assert len(hass.states.async_all()) == 5 hass.states.async_set("sensor.heartworm_high_80", 10) await hass.async_block_till_done() await hass.async_block_till_done() assert "Template loop detected" not in caplog.text state = hass.states.get("sensor.heartworm_risk") assert state.state == "extreme" assert state.attributes[ATTR_ICON] == "mdi:hazard-lights" await hass.async_block_till_done() assert state.state == "extreme" assert state.attributes[ATTR_ICON] == "mdi:hazard-lights" assert "Template loop detected" not in caplog.text @pytest.mark.parametrize("count,domain", [(1, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "friendly_name_template": "{{ states.sensor.test_state.state }}", } }, } }, ], ) async def test_duplicate_templates(hass, start_ha): """Test template entity where the value and friendly name as the same template.""" hass.states.async_set("sensor.test_state", "Abc") await hass.async_block_till_done() state = hass.states.get(TEST_NAME) assert state.attributes["friendly_name"] == "Abc" assert state.state == "Abc" hass.states.async_set("sensor.test_state", "Def") await hass.async_block_till_done() state = hass.states.get(TEST_NAME) assert state.attributes["friendly_name"] == "Def" assert state.state == "Def" @pytest.mark.parametrize("count,domain", [(2, "template")]) @pytest.mark.parametrize( "config", [ { "template": [ {"invalid": "config"}, # Config after invalid should still be set up { "unique_id": "listening-test-event", "trigger": {"platform": "event", "event_type": "test_event"}, "sensors": { "hello": { "friendly_name": "Hello Name", "unique_id": "hello_name-id", "device_class": "battery", "unit_of_measurement": "%", "value_template": "{{ trigger.event.data.beer }}", "entity_picture_template": "{{ '/local/dogs.png' }}", "icon_template": "{{ 'mdi:pirate' }}", "attribute_templates": { "plus_one": "{{ trigger.event.data.beer + 1 }}" }, }, }, "sensor": [ { "name": "via list", "unique_id": "via_list-id", "device_class": "battery", "unit_of_measurement": "%", "availability": "{{ True }}", "state": "{{ trigger.event.data.beer + 1 }}", "picture": "{{ '/local/dogs.png' }}", "icon": "{{ 'mdi:pirate' }}", "attributes": { "plus_one": "{{ trigger.event.data.beer + 1 }}" }, "state_class": "measurement", } ], }, { "trigger": [], "sensors": { "bare_minimum": { "value_template": "{{ trigger.event.data.beer }}" }, }, }, ], }, ], ) async def test_trigger_entity(hass, start_ha): """Test trigger entity works.""" state = hass.states.get("sensor.hello_name") assert state is not None assert state.state == STATE_UNKNOWN state = hass.states.get("sensor.bare_minimum") assert state is not None assert state.state == STATE_UNKNOWN context = Context() hass.bus.async_fire("test_event", {"beer": 2}, context=context) await hass.async_block_till_done() state = hass.states.get("sensor.hello_name") assert state.state == "2" assert state.attributes.get("device_class") == "battery" assert state.attributes.get("icon") == "mdi:pirate" assert state.attributes.get("entity_picture") == "/local/dogs.png" assert state.attributes.get("plus_one") == 3 assert state.attributes.get("unit_of_measurement") == "%" assert state.context is context ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ( ent_reg.entities["sensor.hello_name"].unique_id == "listening-test-event-hello_name-id" ) assert ( ent_reg.entities["sensor.via_list"].unique_id == "listening-test-event-via_list-id" ) state = hass.states.get("sensor.via_list") assert state.state == "3" assert state.attributes.get("device_class") == "battery" assert state.attributes.get("icon") == "mdi:pirate" assert state.attributes.get("entity_picture") == "/local/dogs.png" assert state.attributes.get("plus_one") == 3 assert state.attributes.get("unit_of_measurement") == "%" assert state.attributes.get("state_class") == "measurement" assert state.context is context @pytest.mark.parametrize("count,domain", [(1, "template")]) @pytest.mark.parametrize( "config", [ { "template": { "trigger": {"platform": "event", "event_type": "test_event"}, "sensors": { "hello": { "unique_id": "no-base-id", "friendly_name": "Hello", "value_template": "{{ non_existing + 1 }}", } }, }, }, ], ) async def test_trigger_entity_render_error(hass, start_ha): """Test trigger entity handles render error.""" state = hass.states.get("sensor.hello") assert state is not None assert state.state == STATE_UNKNOWN context = Context() hass.bus.async_fire("test_event", {"beer": 2}, context=context) await hass.async_block_till_done() state = hass.states.get("sensor.hello") assert state.state == STATE_UNAVAILABLE ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 1 assert ent_reg.entities["sensor.hello"].unique_id == "no-base-id" @pytest.mark.parametrize("count,domain", [(0, sensor.DOMAIN)]) @pytest.mark.parametrize( "config", [ { "sensor": { "platform": "template", "trigger": {"platform": "event", "event_type": "test_event"}, "sensors": { "test_template_sensor": { "value_template": "{{ states.sensor.test_state.state }}", "friendly_name_template": "{{ states.sensor.test_state.state }}", } }, } }, ], ) async def test_trigger_not_allowed_platform_config(hass, start_ha, caplog_setup_text): """Test we throw a helpful warning if a trigger is configured in platform config.""" state = hass.states.get(TEST_NAME) assert state is None assert ( "You can only add triggers to template entities if they are defined under `template:`." in caplog_setup_text ) @pytest.mark.parametrize("count,domain", [(1, "template")]) @pytest.mark.parametrize( "config", [ { "template": { "sensor": { "name": "top-level", "device_class": "battery", "state_class": "measurement", "state": "5", "unit_of_measurement": "%", }, }, }, ], ) async def test_config_top_level(hass, start_ha): """Test unique_id option only creates one sensor per id.""" assert len(hass.states.async_all()) == 1 state = hass.states.get("sensor.top_level") assert state is not None assert state.state == "5" assert state.attributes["device_class"] == "battery" assert state.attributes["state_class"] == "measurement" async def test_trigger_entity_available(hass): """Test trigger entity availability works.""" assert await async_setup_component( hass, "template", { "template": [ { "trigger": {"platform": "event", "event_type": "test_event"}, "sensor": [ { "name": "Maybe Available", "availability": "{{ trigger and trigger.event.data.beer == 2 }}", "state": "{{ trigger.event.data.beer }}", }, ], }, ], }, ) await hass.async_block_till_done() # Sensors are unknown if never triggered state = hass.states.get("sensor.maybe_available") assert state is not None assert state.state == STATE_UNKNOWN hass.bus.async_fire("test_event", {"beer": 2}) await hass.async_block_till_done() state = hass.states.get("sensor.maybe_available") assert state.state == "2" hass.bus.async_fire("test_event", {"beer": 1}) await hass.async_block_till_done() state = hass.states.get("sensor.maybe_available") assert state.state == "unavailable" async def test_trigger_entity_device_class_parsing_works(hass): """Test trigger entity device class parsing works.""" assert await async_setup_component( hass, "template", { "template": [ { "trigger": {"platform": "event", "event_type": "test_event"}, "sensor": [ { "name": "Date entity", "state": "{{ now().date() }}", "device_class": "date", }, { "name": "Timestamp entity", "state": "{{ now() }}", "device_class": "timestamp", }, ], }, ], }, ) await hass.async_block_till_done() now = dt_util.now() with patch("homeassistant.util.dt.now", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() date_state = hass.states.get("sensor.date_entity") assert date_state is not None assert date_state.state == now.date().isoformat() ts_state = hass.states.get("sensor.timestamp_entity") assert ts_state is not None assert ts_state.state == now.isoformat(timespec="seconds") async def test_trigger_entity_device_class_errors_works(hass): """Test trigger entity device class errors works.""" assert await async_setup_component( hass, "template", { "template": [ { "trigger": {"platform": "event", "event_type": "test_event"}, "sensor": [ { "name": "Date entity", "state": "invalid", "device_class": "date", }, { "name": "Timestamp entity", "state": "invalid", "device_class": "timestamp", }, ], }, ], }, ) await hass.async_block_till_done() now = dt_util.now() with patch("homeassistant.util.dt.now", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() date_state = hass.states.get("sensor.date_entity") assert date_state is not None assert date_state.state == STATE_UNKNOWN ts_state = hass.states.get("sensor.timestamp_entity") assert ts_state is not None assert ts_state.state == STATE_UNKNOWN async def test_entity_device_class_parsing_works(hass): """Test entity device class parsing works.""" now = dt_util.now() with patch("homeassistant.util.dt.now", return_value=now): assert await async_setup_component( hass, "template", { "template": [ { "sensor": [ { "name": "Date entity", "state": "{{ now().date() }}", "device_class": "date", }, { "name": "Timestamp entity", "state": "{{ now() }}", "device_class": "timestamp", }, ], }, ], }, ) await hass.async_block_till_done() date_state = hass.states.get("sensor.date_entity") assert date_state is not None assert date_state.state == now.date().isoformat() ts_state = hass.states.get("sensor.timestamp_entity") assert ts_state is not None assert ts_state.state == now.isoformat(timespec="seconds") async def test_entity_device_class_errors_works(hass): """Test entity device class errors works.""" assert await async_setup_component( hass, "template", { "template": [ { "sensor": [ { "name": "Date entity", "state": "invalid", "device_class": "date", }, { "name": "Timestamp entity", "state": "invalid", "device_class": "timestamp", }, ], }, ], }, ) await hass.async_block_till_done() now = dt_util.now() with patch("homeassistant.util.dt.now", return_value=now): hass.bus.async_fire("test_event") await hass.async_block_till_done() date_state = hass.states.get("sensor.date_entity") assert date_state is not None assert date_state.state == STATE_UNKNOWN ts_state = hass.states.get("sensor.timestamp_entity") assert ts_state is not None assert ts_state.state == STATE_UNKNOWN
{ "content_hash": "0d71ea0a2909ee2cb52faa239da8c6f4", "timestamp": "", "source": "github", "line_count": 1262, "max_line_length": 142, "avg_line_length": 34.038827258320126, "alnum_prop": 0.5050399236445748, "repo_name": "jawilson/home-assistant", "id": "0352080bed830c641374e899f9be5d0ef4b6192c", "size": "42957", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/components/template/test_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2782" }, { "name": "Python", "bytes": "40129467" }, { "name": "Shell", "bytes": "4910" } ], "symlink_target": "" }
"""Routines for loading PUDL data into various storage formats.""" import logging import sys from sqlite3 import Connection as SQLite3Connection from sqlite3 import sqlite_version from typing import Dict import pandas as pd import sqlalchemy as sa from packaging import version from sqlalchemy.exc import IntegrityError from pudl.helpers import find_foreign_key_errors from pudl.metadata.classes import Package logger = logging.getLogger(__name__) MINIMUM_SQLITE_VERSION = "3.32.0" def dfs_to_sqlite( dfs: Dict[str, pd.DataFrame], engine: sa.engine.Engine, check_foreign_keys: bool = True, check_types: bool = True, check_values: bool = True, ) -> None: """Load a dictionary of dataframes into the PUDL SQLite DB. Args: dfs: Dictionary mapping table names to dataframes. engine: PUDL DB connection engine. check_foreign_keys: if True, enforce foreign key constraints. check_types: if True, enforce column data types. check_values: if True, enforce value constraints. """ # This magic makes SQLAlchemy tell SQLite to check foreign key constraints # whenever we insert data into thd database, which it doesn't do by default @sa.event.listens_for(sa.engine.Engine, "connect") def _set_sqlite_pragma(dbapi_connection, connection_record): if isinstance(dbapi_connection, SQLite3Connection): cursor = dbapi_connection.cursor() cursor.execute( f"PRAGMA foreign_keys={'ON' if check_foreign_keys else 'OFF'};" ) cursor.close() bad_sqlite_version = version.parse(sqlite_version) < version.parse( MINIMUM_SQLITE_VERSION ) if bad_sqlite_version and check_types: check_types = False logger.warning( f"Found SQLite {sqlite_version} which is less than " f"the minimum required version {MINIMUM_SQLITE_VERSION} " "As a result, data type constraint checking has been disabled." ) # Generate a SQLAlchemy MetaData object from dataframe names: md = Package.from_resource_ids(resource_ids=tuple(sorted(dfs))).to_sql( check_types=check_types, check_values=check_values ) # Delete any existing tables, and create them anew: md.drop_all(engine) md.create_all(engine) # Load any tables that exist in our dictionary of dataframes into the # corresponding table in the newly create database: for table in md.sorted_tables: logger.info(f"Loading {table.name} into PUDL SQLite DB.") try: dfs[table.name].to_sql( table.name, engine, if_exists="append", index=False, dtype={c.name: c.type for c in table.columns}, ) except IntegrityError as err: logger.info(find_foreign_key_errors(dfs)) logger.info(err) sys.exit(1)
{ "content_hash": "9b702838070fce13a66d3d564700f7b1", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 79, "avg_line_length": 35.083333333333336, "alnum_prop": 0.6562606040040719, "repo_name": "catalyst-cooperative/pudl", "id": "d12a54f5ed45f307006a676d11714dc97fab10d4", "size": "2947", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/pudl/load.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jinja", "bytes": "7488" }, { "name": "Python", "bytes": "1658629" }, { "name": "Shell", "bytes": "5118" } ], "symlink_target": "" }
import re def compile(name): print "Compiling "+name+"..." file = open('src/' + name + '/' + name + '.tpl.html', 'r') tpl=file.read() tpl=tpl.replace("'","\\'"); tpl=tpl.replace("\n","").replace("\r","").replace("\t"," ") tpl=re.sub("\s\s+" , " ", tpl) file = open('src/' + name + '/' + name + '.js', 'r') js=file.read() js=js.replace("%%TEMPLATE%%",tpl) text_file = open("dist/" + name + ".min.js", "w") text_file.write(js) text_file.close() try: file = open('src/' + name + '/' + name + '.css', 'r') js=file.read() text_file = open("dist/" + name + ".css", "w") text_file.write(js) text_file.close() except: pass compile("ilTable") compile("ilDetail") compile("ilModal") compile("ilAdvancedCombo") compile("ilInput") compile("ilTable2") compile("ilWeek") compile("ilYearCalendar") compile("ilUpload") compile("ilSurvey") compile("ilList") compile("ilPanel") compile("ilLoadingButton") compile("ilSearch") compile("ilCVSTable") compile("ilCalendar") compile("ilMasterDetail")
{ "content_hash": "765ae0262d29ef4e71b260f90028ff1c", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 60, "avg_line_length": 22.488888888888887, "alnum_prop": 0.6096837944664032, "repo_name": "ilausuch/il.angularUI", "id": "2bf777c3475351efc1a2aa4920ee4dcb2c1c37ec", "size": "1012", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "compile.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "9925" }, { "name": "HTML", "bytes": "182601" }, { "name": "JavaScript", "bytes": "113728" }, { "name": "Python", "bytes": "1012" } ], "symlink_target": "" }
import os import config import logging import json import models.node as nm import models.proxy as pr INSTANCE_FILE = os.path.join(config.PERMDIR, 'details.json') INSTANCE_INTERMEDIA_FILE = os.path.join(config.PERMDIR, 'details.tmp.json') def write_details(nodes, proxies): with open(INSTANCE_INTERMEDIA_FILE, 'w') as f: f.write(json.dumps({'nodes': nodes, 'proxies': proxies})) os.rename(INSTANCE_INTERMEDIA_FILE, INSTANCE_FILE) def read_details(): try: with open(INSTANCE_FILE, 'r') as f: return json.loads(f.read()) except IOError, e: logging.exception(e) return {'nodes': {}, 'proxies': {}} POLL_FILE = os.path.join(config.PERMDIR, 'poll.json') POLL_INTERMEDIA_FILE = os.path.join(config.PERMDIR, 'poll.tmp.json') def write_poll(nodes, proxies): with open(POLL_INTERMEDIA_FILE, 'w') as f: f.write(json.dumps({ 'nodes': nodes, 'proxies': proxies, })) os.rename(POLL_INTERMEDIA_FILE, POLL_FILE) def read_poll(): try: with open(POLL_FILE, 'r') as f: return json.loads(f.read()) except IOError, e: logging.exception(e) return {'nodes': [], 'proxies': []} def write_nodes(nodes, proxies): poll_nodes = [] for n in nodes: i = { 'host': n.host, 'port': n.port, 'suppress_alert': n.suppress_alert, } poll_nodes.append(i) write_poll( poll_nodes, [{ 'host': p.host, 'port': p.port, 'suppress_alert': p.suppress_alert, } for p in proxies]) def write_nodes_proxies_from_db(): write_nodes(nm.list_all_nodes(), pr.list_all())
{ "content_hash": "7e6ccf12619916eafb2fc8fb17667247", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 75, "avg_line_length": 25.279411764705884, "alnum_prop": 0.5840605002908668, "repo_name": "HunanTV/redis-ctl", "id": "ad1293d7988c2118b0ca57ed6f07a28f6c2f03ab", "size": "1719", "binary": false, "copies": "1", "ref": "refs/heads/v0.9", "path": "app/file_ipc.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "19525" }, { "name": "HTML", "bytes": "153585" }, { "name": "JavaScript", "bytes": "29218" }, { "name": "Makefile", "bytes": "1978" }, { "name": "Python", "bytes": "156198" } ], "symlink_target": "" }
""" Slice nodes. Slices are important when working with lists. Tracking them can allow to achieve more compact code, or predict results at compile time. There will be a method "computeExpressionSlice" to aid predicting them. """ from nuitka.PythonVersions import python_version from nuitka.specs import BuiltinParameterSpecs from .ConstantRefNodes import ExpressionConstantNoneRef, makeConstantRefNode from .ExpressionBases import ( ExpressionChildHavingBase, ExpressionChildrenHavingBase, ExpressionSpecBasedComputationNoRaiseMixin, ) from .NodeBases import ( SideEffectsFromChildrenMixin, StatementChildrenHavingBase, ) from .NodeMakingHelpers import ( convertNoneConstantToNone, makeStatementExpressionOnlyReplacementNode, makeStatementOnlyNodesFromExpressions, wrapExpressionWithSideEffects, ) from .shapes.BuiltinTypeShapes import tshape_slice class StatementAssignmentSlice(StatementChildrenHavingBase): kind = "STATEMENT_ASSIGNMENT_SLICE" named_children = ("source", "expression", "lower", "upper") def __init__(self, expression, lower, upper, source, source_ref): assert python_version < 0x300 StatementChildrenHavingBase.__init__( self, values={ "source": source, "expression": expression, "lower": lower, "upper": upper, }, source_ref=source_ref, ) def computeStatement(self, trace_collection): source = trace_collection.onExpression(self.subnode_source) # No assignment will occur, if the assignment source raises, so strip it # away. if source.willRaiseException(BaseException): result = makeStatementExpressionOnlyReplacementNode( expression=source, node=self ) return ( result, "new_raise", """\ Slice assignment raises exception in assigned value, removed assignment.""", ) lookup_source = trace_collection.onExpression(self.subnode_expression) if lookup_source.willRaiseException(BaseException): result = makeStatementOnlyNodesFromExpressions( expressions=(source, lookup_source) ) return ( result, "new_raise", """\ Slice assignment raises exception in sliced value, removed assignment.""", ) lower = trace_collection.onExpression(self.subnode_lower, allow_none=True) if lower is not None and lower.willRaiseException(BaseException): result = makeStatementOnlyNodesFromExpressions( expressions=(source, lookup_source, lower) ) return ( result, "new_raise", """\ Slice assignment raises exception in lower slice boundary value, removed \ assignment.""", ) upper = trace_collection.onExpression(self.subnode_upper, allow_none=True) if upper is not None and upper.willRaiseException(BaseException): result = makeStatementOnlyNodesFromExpressions( expressions=(source, lookup_source, lower, upper) ) return ( result, "new_raise", """\ Slice assignment raises exception in upper slice boundary value, removed \ assignment.""", ) return lookup_source.computeExpressionSetSlice( set_node=self, lower=lower, upper=upper, value_node=source, trace_collection=trace_collection, ) class StatementDelSlice(StatementChildrenHavingBase): kind = "STATEMENT_DEL_SLICE" named_children = ("expression", "lower", "upper") def __init__(self, expression, lower, upper, source_ref): StatementChildrenHavingBase.__init__( self, values={"expression": expression, "lower": lower, "upper": upper}, source_ref=source_ref, ) def computeStatement(self, trace_collection): lookup_source = trace_collection.onExpression(self.subnode_expression) if lookup_source.willRaiseException(BaseException): result = makeStatementExpressionOnlyReplacementNode( expression=lookup_source, node=self ) return ( result, "new_raise", """\ Slice del raises exception in sliced value, removed del""", ) lower = trace_collection.onExpression(self.subnode_lower, allow_none=True) if lower is not None and lower.willRaiseException(BaseException): result = makeStatementOnlyNodesFromExpressions( expressions=(lookup_source, lower) ) return ( result, "new_raise", """ Slice del raises exception in lower slice boundary value, removed del""", ) trace_collection.onExpression(self.subnode_upper, allow_none=True) upper = self.subnode_upper if upper is not None and upper.willRaiseException(BaseException): result = makeStatementOnlyNodesFromExpressions( expressions=(lookup_source, lower, upper) ) return ( result, "new_raise", """ Slice del raises exception in upper slice boundary value, removed del""", ) return lookup_source.computeExpressionDelSlice( set_node=self, lower=lower, upper=upper, trace_collection=trace_collection ) class ExpressionSliceLookup(ExpressionChildrenHavingBase): kind = "EXPRESSION_SLICE_LOOKUP" named_children = ("expression", "lower", "upper") checkers = {"upper": convertNoneConstantToNone, "lower": convertNoneConstantToNone} def __init__(self, expression, lower, upper, source_ref): assert python_version < 0x300 ExpressionChildrenHavingBase.__init__( self, values={"expression": expression, "upper": upper, "lower": lower}, source_ref=source_ref, ) def computeExpression(self, trace_collection): lookup_source = self.subnode_expression return lookup_source.computeExpressionSlice( lookup_node=self, lower=self.subnode_lower, upper=self.subnode_upper, trace_collection=trace_collection, ) @staticmethod def isKnownToBeIterable(count): # TODO: Should ask SliceRegistry return None def makeExpressionBuiltinSlice(start, stop, step, source_ref): if ( (start is None or start.isCompileTimeConstant()) and (stop is None or stop.isCompileTimeConstant()) and (step is None or step.isCompileTimeConstant()) ): # Avoid going slices for what is effectively constant. start_value = None if start is None else start.getCompileTimeConstant() stop_value = None if stop is None else stop.getCompileTimeConstant() step_value = None if step is None else step.getCompileTimeConstant() return makeConstantRefNode( constant=slice(start_value, stop_value, step_value), source_ref=source_ref ) if start is None and step is None: return ExpressionBuiltinSlice1(stop=stop, source_ref=source_ref) if start is None: start = ExpressionConstantNoneRef(source_ref=source_ref) if stop is None: stop = ExpressionConstantNoneRef(source_ref=source_ref) if step is None: return ExpressionBuiltinSlice2(start=start, stop=stop, source_ref=source_ref) return ExpressionBuiltinSlice3( start=start, stop=stop, step=step, source_ref=source_ref ) class ExpressionBuiltinSliceMixin(SideEffectsFromChildrenMixin): # Mixins are required to slots __slots__ = () builtin_spec = BuiltinParameterSpecs.builtin_slice_spec @staticmethod def getTypeShape(): return tshape_slice @staticmethod def isKnownToBeIterable(count): # Virtual method provided by mixin, pylint: disable=unused-argument # Definitely not iterable at all return False def mayHaveSideEffects(self): return self.mayRaiseException(BaseException) class ExpressionBuiltinSlice3( ExpressionBuiltinSliceMixin, ExpressionSpecBasedComputationNoRaiseMixin, ExpressionChildrenHavingBase, ): kind = "EXPRESSION_BUILTIN_SLICE3" named_children = ("start", "stop", "step") def __init__(self, start, stop, step, source_ref): ExpressionChildrenHavingBase.__init__( self, values={"start": start, "stop": stop, "step": step}, source_ref=source_ref, ) def computeExpression(self, trace_collection): if ( self.subnode_step.isExpressionConstantNoneRef() or self.subnode_step.getIndexValue() == 1 ): return trace_collection.computedExpressionResult( wrapExpressionWithSideEffects( old_node=self, new_node=ExpressionBuiltinSlice2( start=self.subnode_start, stop=self.subnode_stop, source_ref=self.source_ref, ), side_effects=self.subnode_step.extractSideEffects(), ), "new_expression", "Reduce 3 argument slice object creation to two argument form.", ) return self.computeBuiltinSpec( trace_collection=trace_collection, given_values=(self.subnode_start, self.subnode_stop, self.subnode_step), ) def mayRaiseException(self, exception_type): return ( self.subnode_start.mayRaiseException(exception_type) or self.subnode_stop.mayRaiseException(exception_type) or self.subnode_step.mayRaiseException(exception_type) ) class ExpressionBuiltinSlice2( ExpressionBuiltinSliceMixin, ExpressionSpecBasedComputationNoRaiseMixin, ExpressionChildrenHavingBase, ): kind = "EXPRESSION_BUILTIN_SLICE2" named_children = ("start", "stop") def __init__(self, start, stop, source_ref): ExpressionChildrenHavingBase.__init__( self, values={"start": start, "stop": stop}, source_ref=source_ref, ) def computeExpression(self, trace_collection): if self.subnode_start.isExpressionConstantNoneRef(): return trace_collection.computedExpressionResult( wrapExpressionWithSideEffects( old_node=self, new_node=ExpressionBuiltinSlice1( stop=self.subnode_stop, source_ref=self.source_ref ), side_effects=self.subnode_start.extractSideEffects(), ), "new_expression", "Reduce 2 argument slice object creation to single argument form.", ) return self.computeBuiltinSpec( trace_collection=trace_collection, given_values=(self.subnode_start, self.subnode_stop), ) def mayRaiseException(self, exception_type): return self.subnode_start.mayRaiseException( exception_type ) or self.subnode_stop.mayRaiseException(exception_type) class ExpressionBuiltinSlice1( ExpressionBuiltinSliceMixin, ExpressionSpecBasedComputationNoRaiseMixin, ExpressionChildHavingBase, ): kind = "EXPRESSION_BUILTIN_SLICE1" named_child = "stop" def __init__(self, stop, source_ref): ExpressionChildHavingBase.__init__( self, value=stop, source_ref=source_ref, ) def computeExpression(self, trace_collection): return self.computeBuiltinSpec( trace_collection=trace_collection, given_values=(self.subnode_stop,), ) def mayRaiseException(self, exception_type): return self.subnode_stop.mayRaiseException(exception_type)
{ "content_hash": "508a48ba177f95fe189fd0545bad0b37", "timestamp": "", "source": "github", "line_count": 378, "max_line_length": 87, "avg_line_length": 32.39153439153439, "alnum_prop": 0.6246324730480235, "repo_name": "kayhayen/Nuitka", "id": "3e43734854b6d7b5d3ce6ccff60372b6bc544d65", "size": "13024", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "nuitka/nodes/SliceNodes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1868" }, { "name": "C", "bytes": "617681" }, { "name": "C++", "bytes": "149777" }, { "name": "Python", "bytes": "6603718" }, { "name": "Shell", "bytes": "1088" } ], "symlink_target": "" }
from flask import Flask import json, requests # configuration DEBUG = True SERVER = 'http://192.168.1.104:5000' app = Flask(__name__) app.config.from_object(__name__) app.config.from_envvar('CURLYSERV_CONF', silent=True) # Get list of files def uploads(username, password): url = app.config['SERVER'] + '/listoffiles' data = {'username' : username, 'password' : password} requesting = requests.post(url, json=data) print requesting.content return requesting # Upload def upload(username, password, file): file = {'file' : open(file, mode='r')} data = {'username' : username, 'password' : password} url = app.config['SERVER'] + '/upload' requesting = requests.post(url, data = data, files = file) print requesting.content return requesting # Download file def download(username, password, filename): args = {'username' : username, 'password' : password, 'filename' : filename} url = app.config['SERVER'] + '/download' requesting = requests.get(url, params=args, stream=True) return requesting # Remove file from server def remove_file(username, password, filename): url = app.config['SERVER'] + '/remove' data = {'username' : username, 'password' : password, 'filename' : filename} requesting = requests.post(url, data=data) return requesting if __name__ == '__main__': app.run(host='0.0.0.0')
{ "content_hash": "d4c035fc3e91926f628e7d1c4b36e5ad", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 77, "avg_line_length": 30.068181818181817, "alnum_prop": 0.6984126984126984, "repo_name": "ayttew/curlyserver", "id": "ba764f60a10014aaa03de58caa7a2c01a77ea7e9", "size": "1323", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "curlyclient.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "2659" }, { "name": "Python", "bytes": "11193" } ], "symlink_target": "" }
''' Created on 2015/08/28 @author: haisland0909 ''' from sklearn.pipeline import FeatureUnion from sklearn.grid_search import GridSearchCV from sklearn import cross_validation from sklearn import preprocessing from sklearn.metrics import mean_absolute_error import sklearn.linear_model import sklearn.ensemble import img_to_pickle as i_p import features as f import numpy as np import pandas as pd clf_dict = { 'LR': { "name": 'L2 Logistic Regression', "clf": sklearn.linear_model.LogisticRegression(penalty='l2', dual=False), "paramteters": {'C': [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]} }, 'GB2': { "name": 'Gradient Boosting New', "clf": sklearn.ensemble.GradientBoostingClassifier(random_state=1), "paramteters": { 'learning_rate': [0.005, 0.01, 0.1], 'n_estimators': [50, 250, 500], 'subsample': [1.0, 0.8], 'max_features': [1.0, 0.8], 'min_samples_split': [2], 'min_samples_leaf': [1, 2], 'max_depth': [2, 5, 8] } } } def get_data(): ''' get X, y data :rtype: tuple ''' _, _, _, train_gray_data, _, _, labels = i_p.load_data() data_df = f.make_data_df(train_gray_data, labels) fu = FeatureUnion(transformer_list=f.feature_transformer_rule) X = fu.fit_transform(data_df) y = np.concatenate(data_df["label"].apply(lambda x: x.flatten())) return (X, y) def get_data_Kfold(mode): ''' get X, y data :rtype: tuple ''' if mode == "train": _, _, _, train_gray_data, _, _, labels = i_p.load_data() data_df = f.make_data_df(train_gray_data, labels) data_df = data_df.reset_index() data_df.columns = ["pngname", "input", "label"] keys = np.asarray(train_gray_data.keys()) kf = cross_validation.KFold(n=len(keys), n_folds=5) return data_df, keys, kf elif mode == "test": _, _, _, _, test_gray_data, _, _ = i_p.load_data() return test_gray_data else: print "mode error!" print "set \"train\" or \"test\"" quit() def set_validdata(df, keys): fu = FeatureUnion(transformer_list=f.feature_transformer_rule) Std = preprocessing.StandardScaler() for i in xrange(len(keys)): if i == 0: valid_df = df[(df["pngname"] == keys[i])] else: valid_df = pd.concat([valid_df, df[(df["pngname"] == keys[i])]]) valid_df = valid_df.drop("pngname", axis=1).reset_index() X = fu.fit_transform(valid_df) y = np.concatenate(valid_df["label"].apply(lambda x: x.flatten())) X = Std.fit_transform(X) return (X, y) def set_traindata(df, key): fu = FeatureUnion(transformer_list=f.feature_transformer_rule) Std = preprocessing.StandardScaler() X = fu.fit_transform(df) y = np.concatenate(df["label"].apply(lambda x: x.flatten())) X = Std.fit_transform(X) return (X, y) def kfold_validation_model(model_name="LR"): data_df, keys, kf = get_data_Kfold("train") """ SGD Regression model with stochastic gradient descent Prnalty : L2 """ scores = [] cnt = 1 for train_index, valid_index in kf: print cnt cnt += 1 clf = sklearn.linear_model.SGDRegressor(penalty='l2') train_keys = keys[train_index] valid_keys = keys[valid_index] for i in xrange(len(train_keys)): train_X, train_y = set_traindata(data_df, train_keys[i]) clf.partial_fit(train_X, train_y) valid_X, valid_y = set_validdata(data_df, valid_keys) # predict_prova = clf.predict(valid_X) predict_y = clf.predict(valid_X) score = mean_absolute_error(valid_y, predict_y) scores.append(score) print scores print "Score_Average:", np.average(np.asarray(scores)) def cross_validation_model(model_name="LR"): X, y = get_data() clf = GridSearchCV(estimator=clf_dict[model_name]["clf"], param_grid=clf_dict[model_name]["paramteters"], n_jobs=3, scoring="accuracy") scores = cross_validation.cross_val_score(clf, X, y, cv=5) print scores def downsampling_data(X, y, ratio=0.5, random_state=1): np.random.seed(random_state) assert X.shape[0] == y.size length = X.shape[0] len_range = range(0, length) use_length = int(length * ratio) use_index = np.random.choice(len_range, use_length, replace=False) use_X = X[use_index, :] use_y = y[use_index] return (use_X, use_y) if __name__ == '__main__': # cross_validation_model() kfold_validation_model()
{ "content_hash": "24db206131ccda02a20bd70a86c34707", "timestamp": "", "source": "github", "line_count": 177, "max_line_length": 81, "avg_line_length": 26.412429378531073, "alnum_prop": 0.5863101604278075, "repo_name": "haisland0909/Denoising-Dirty-Documents", "id": "b6cf0312428367e0a7e1e2df5ec913b7729fcf01", "size": "4675", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "script/classify.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "42032" } ], "symlink_target": "" }
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from builtins import super from future import standard_library standard_library.install_aliases() import sys from multiprocessing import Process from . import logger from .config import Config log = logger.getLogger(__name__) config = Config() try: from PyQt4 import QtGui, QtCore, QtDBus pyqt_activity = True except ImportError: pyqt_activity = False log.warn("PyQt4 module not installed.") log.warn("Osdlyrics Not Available.") if pyqt_activity: class Lyrics(QtGui.QWidget): def __init__(self): super(Lyrics, self).__init__() self.__dbusAdaptor = LyricsAdapter(self) self.initUI() def initUI(self): self.setStyleSheet("background:" + config.get_item( "osdlyrics_background")) if config.get_item("osdlyrics_transparent"): self.setAttribute(QtCore.Qt.WA_TranslucentBackground) self.setAttribute(QtCore.Qt.WA_ShowWithoutActivating) self.setAttribute(QtCore.Qt.WA_X11DoNotAcceptFocus) self.setFocusPolicy(QtCore.Qt.NoFocus) if config.get_item("osdlyrics_on_top"): self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.X11BypassWindowManagerHint) else: self.setWindowFlags(QtCore.Qt.FramelessWindowHint) self.setMinimumSize(600, 50) self.resize(600, 60) scn = QtGui.QApplication.desktop().screenNumber( QtGui.QApplication.desktop().cursor().pos()) br = QtGui.QApplication.desktop().screenGeometry(scn).bottomRight() frameGeo = self.frameGeometry() frameGeo.moveBottomRight(br) self.move(frameGeo.topLeft()) self.text = "OSD Lyrics for Musicbox" self.setWindowTitle("Lyrics") self.show() def mousePressEvent(self, event): self.mpos = event.pos() def mouseMoveEvent(self, event): if (event.buttons() and QtCore.Qt.LeftButton): diff = event.pos() - self.mpos newpos = self.pos() + diff self.move(newpos) def wheelEvent(self, event): self.resize(self.width() + event.delta(), self.height()) def paintEvent(self, event): qp = QtGui.QPainter() qp.begin(self) self.drawText(event, qp) qp.end() def drawText(self, event, qp): osdlyrics_color = config.get_item("osdlyrics_color") osdlyrics_font = config.get_item("osdlyrics_font") font = QtGui.QFont(osdlyrics_font[0], osdlyrics_font[1]) pen = QtGui.QColor(osdlyrics_color[0], osdlyrics_color[1], osdlyrics_color[2]) qp.setFont(font) qp.setPen(pen) qp.drawText(event.rect(), QtCore.Qt.AlignCenter | QtCore.Qt.TextWordWrap, self.text) class LyricsAdapter(QtDBus.QDBusAbstractAdaptor): QtCore.Q_CLASSINFO("D-Bus Interface", "local.musicbox.Lyrics") QtCore.Q_CLASSINFO( "D-Bus Introspection", ' <interface name="local.musicbox.Lyrics">\n' ' <method name="refresh_lyrics">\n' ' <arg direction="in" type="s" name="lyric"/>\n' ' </method>\n' ' </interface>\n') def __init__(self, parent): super(LyricsAdapter, self).__init__(parent) @QtCore.pyqtSlot(str) def refresh_lyrics(self, text): self.parent().text = text self.parent().repaint() def show_lyrics(): app = QtGui.QApplication(sys.argv) lyrics = Lyrics() QtDBus.QDBusConnection.sessionBus().registerService('org.musicbox.Bus') QtDBus.QDBusConnection.sessionBus().registerObject('/', lyrics) sys.exit(app.exec_()) def show_lyrics_new_process(): if pyqt_activity and config.get_item("osdlyrics"): p = Process(target=show_lyrics) p.daemon = True p.start()
{ "content_hash": "0ec15c83ff9e87f2be48c3d2dbd732a3", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 79, "avg_line_length": 35.768595041322314, "alnum_prop": 0.5861829944547134, "repo_name": "caitinggui/musicbox", "id": "5741f75342778d33ea01277a2b8c57457605d9a6", "size": "4467", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "NEMbox/osdlyrics.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "137064" } ], "symlink_target": "" }
from __future__ import absolute_import import os import six from pyarrow.pandas_compat import _pandas_api # noqa from pyarrow.lib import FeatherError # noqa from pyarrow.lib import Table, concat_tables import pyarrow.lib as ext def _check_pandas_version(): if _pandas_api.loose_version < '0.17.0': raise ImportError("feather requires pandas >= 0.17.0") class FeatherReader(ext.FeatherReader): def __init__(self, source): _check_pandas_version() self.source = source self.open(source) def read_table(self, columns=None): if columns is None: return self._read() column_types = [type(column) for column in columns] if all(map(lambda t: t == int, column_types)): return self._read_indices(columns) elif all(map(lambda t: t == str, column_types)): return self._read_names(columns) column_type_names = [t.__name__ for t in column_types] raise TypeError("Columns must be indices or names. " "Got columns {} of types {}" .format(columns, column_type_names)) def read_pandas(self, columns=None, use_threads=True): return self.read_table(columns=columns).to_pandas( use_threads=use_threads) def check_chunked_overflow(col): if col.data.num_chunks == 1: return if col.type in (ext.binary(), ext.string()): raise ValueError("Column '{0}' exceeds 2GB maximum capacity of " "a Feather binary column. This restriction may be " "lifted in the future".format(col.name)) else: # TODO(wesm): Not sure when else this might be reached raise ValueError("Column '{0}' of type {1} was chunked on conversion " "to Arrow and cannot be currently written to " "Feather format".format(col.name, str(col.type))) class FeatherWriter(object): def __init__(self, dest): _check_pandas_version() self.dest = dest self.writer = ext.FeatherWriter() self.writer.open(dest) def write(self, df): if isinstance(df, _pandas_api.pd.SparseDataFrame): df = df.to_dense() if not df.columns.is_unique: raise ValueError("cannot serialize duplicate column names") # TODO(wesm): Remove this length check, see ARROW-1732 if len(df.columns) > 0: table = Table.from_pandas(df, preserve_index=False) for i, name in enumerate(table.schema.names): col = table[i] check_chunked_overflow(col) self.writer.write_array(name, col.data.chunk(0)) self.writer.close() class FeatherDataset(object): """ Encapsulates details of reading a list of Feather files. Parameters ---------- path_or_paths : List[str] A list of file names validate_schema : boolean, default True Check that individual file schemas are all the same / compatible """ def __init__(self, path_or_paths, validate_schema=True): _check_pandas_version() self.paths = path_or_paths self.validate_schema = validate_schema def read_table(self, columns=None): """ Read multiple feather files as a single pyarrow.Table Parameters ---------- columns : List[str] Names of columns to read from the file Returns ------- pyarrow.Table Content of the file as a table (of columns) """ _fil = FeatherReader(self.paths[0]).read_table(columns=columns) self._tables = [_fil] self.schema = _fil.schema for fil in self.paths[1:]: fil_table = FeatherReader(fil).read_table(columns=columns) if self.validate_schema: self.validate_schemas(fil, fil_table) self._tables.append(fil_table) return concat_tables(self._tables) def validate_schemas(self, piece, table): if not self.schema.equals(table.schema): raise ValueError('Schema in {0!s} was different. \n' '{1!s}\n\nvs\n\n{2!s}' .format(piece, self.schema, table.schema)) def read_pandas(self, columns=None, use_threads=True): """ Read multiple Parquet files as a single pandas DataFrame Parameters ---------- columns : List[str] Names of columns to read from the file use_threads : boolean, default True Use multiple threads when converting to pandas Returns ------- pandas.DataFrame Content of the file as a pandas DataFrame (of columns) """ return self.read_table(columns=columns).to_pandas( use_threads=use_threads) def write_feather(df, dest): """ Write a pandas.DataFrame to Feather format Parameters ---------- df : pandas.DataFrame dest : string Local file path """ writer = FeatherWriter(dest) try: writer.write(df) except Exception: # Try to make sure the resource is closed import gc writer = None gc.collect() if isinstance(dest, six.string_types): try: os.remove(dest) except os.error: pass raise def read_feather(source, columns=None, use_threads=True): """ Read a pandas.DataFrame from Feather format Parameters ---------- source : string file path, or file-like object columns : sequence, optional Only read a specific set of columns. If not provided, all columns are read use_threads: bool, default True Whether to parallelize reading using multiple threads Returns ------- df : pandas.DataFrame """ reader = FeatherReader(source) return reader.read_pandas(columns=columns, use_threads=use_threads) def read_table(source, columns=None): """ Read a pyarrow.Table from Feather format Parameters ---------- source : string file path, or file-like object columns : sequence, optional Only read a specific set of columns. If not provided, all columns are read Returns ------- table : pyarrow.Table """ reader = FeatherReader(source) return reader.read_table(columns=columns)
{ "content_hash": "f632bc3210671bd895d2cfdb7d113d7c", "timestamp": "", "source": "github", "line_count": 216, "max_line_length": 78, "avg_line_length": 30.11574074074074, "alnum_prop": 0.5857033051498847, "repo_name": "majetideepak/arrow", "id": "91b77cb9644f3288366f7f43b6ae7a833182d9bc", "size": "7291", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/pyarrow/feather.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "73655" }, { "name": "Awk", "bytes": "3683" }, { "name": "Batchfile", "bytes": "32714" }, { "name": "C", "bytes": "334766" }, { "name": "C#", "bytes": "505406" }, { "name": "C++", "bytes": "8830397" }, { "name": "CMake", "bytes": "443673" }, { "name": "CSS", "bytes": "3946" }, { "name": "Dockerfile", "bytes": "51066" }, { "name": "Emacs Lisp", "bytes": "931" }, { "name": "FreeMarker", "bytes": "2271" }, { "name": "Go", "bytes": "835735" }, { "name": "HTML", "bytes": "22930" }, { "name": "Java", "bytes": "2941380" }, { "name": "JavaScript", "bytes": "99135" }, { "name": "Lua", "bytes": "8771" }, { "name": "M4", "bytes": "8712" }, { "name": "MATLAB", "bytes": "36600" }, { "name": "Makefile", "bytes": "49294" }, { "name": "Meson", "bytes": "37613" }, { "name": "Objective-C", "bytes": "11580" }, { "name": "PLpgSQL", "bytes": "56995" }, { "name": "Perl", "bytes": "3799" }, { "name": "Python", "bytes": "1885355" }, { "name": "R", "bytes": "214313" }, { "name": "Ruby", "bytes": "729461" }, { "name": "Rust", "bytes": "2011342" }, { "name": "Shell", "bytes": "358704" }, { "name": "TSQL", "bytes": "29787" }, { "name": "Thrift", "bytes": "138360" }, { "name": "TypeScript", "bytes": "1125277" } ], "symlink_target": "" }
""" This module returns stats about the DynamoDB table """ from datetime import datetime, timedelta from boto.exception import JSONResponseError, BotoServerError from retrying import retry from dynamic_dynamodb.aws import dynamodb from dynamic_dynamodb.log_handler import LOGGER as logger from dynamic_dynamodb.aws.cloudwatch import ( CLOUDWATCH_CONNECTION as cloudwatch_connection) from dynamic_dynamodb.config_handler import get_global_option from boto.ec2 import cloudwatch from sys import stdin import dateutil.parser import json def get_consumed_read_units_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of consumed read units in percent :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Number of consumed reads as a percentage of provisioned reads """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_read_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_read_units = 0 try: table_read_units = dynamodb.get_provisioned_table_read_units( table_name) consumed_read_units_percent = ( float(consumed_read_units) / float(table_read_units) * 100) except JSONResponseError: raise logger.info('{0} - Consumed read units: {1:.2f}%'.format( table_name, consumed_read_units_percent)) return consumed_read_units_percent def get_throttled_read_event_count( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled read events during a given time frame :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: int -- Number of throttled read events during the time period """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics: throttled_read_events = int(metrics[0]['Sum']) else: throttled_read_events = 0 logger.info('{0} - Read throttle count: {1:d}'.format( table_name, throttled_read_events)) return throttled_read_events def get_throttled_by_provisioned_read_event_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled read events in percent :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled read events by provisioning """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_read_events = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: throttled_read_events = 0 try: table_read_units = dynamodb.get_provisioned_table_read_units( table_name) throttled_by_provisioned_read_percent = ( float(throttled_read_events) / float(table_read_units) * 100) except JSONResponseError: raise logger.info('{0} - Throttled read percent by provision: {1:.2f}%'.format( table_name, throttled_by_provisioned_read_percent)) return throttled_by_provisioned_read_percent def get_throttled_by_consumed_read_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled read events in percent of consumption :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled read events by consumption """ try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_read_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_read_percent = 0 logger.info('{0} - Throttled read percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_read_percent)) return throttled_by_consumed_read_percent def get_consumed_write_units_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of consumed write units in percent :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Number of consumed writes as a percentage of provisioned writes """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_write_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_write_units = 0 try: table_write_units = dynamodb.get_provisioned_table_write_units( table_name) consumed_write_units_percent = ( float(consumed_write_units) / float(table_write_units) * 100) except JSONResponseError: raise logger.info('{0} - Consumed write units: {1:.2f}%'.format( table_name, consumed_write_units_percent)) return consumed_write_units_percent def get_throttled_write_event_count( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled write events during a given time frame :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: int -- Number of throttled write events during the time period """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: throttled_write_count = int(metrics[0]['Sum']) else: throttled_write_count = 0 logger.info('{0} - Write throttle count: {1:d}'.format( table_name, throttled_write_count)) return throttled_write_count def get_throttled_by_provisioned_write_event_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled write events during a given time frame :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by provisioning """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_write_events = float(metrics[0]['Sum']) / float( lookback_seconds) else: throttled_write_events = 0 try: table_write_units = dynamodb.get_provisioned_table_write_units( table_name) throttled_by_provisioned_write_percent = ( float(throttled_write_events) / float(table_write_units) * 100) except JSONResponseError: raise logger.info('{0} - Throttled write percent by provision: {1:.2f}%'.format( table_name, throttled_by_provisioned_write_percent)) return throttled_by_provisioned_write_percent def get_throttled_by_consumed_write_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled write events in percent of consumption :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by consumption """ try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_write_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_write_percent = 0 logger.info( '{0} - Throttled write percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_write_percent)) return throttled_by_consumed_write_percent @retry( wait='exponential_sleep', wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=10) def __get_aws_metric(table_name, lookback_window_start, lookback_period, metric_name): """ Returns a metric list from the AWS CloudWatch service, may return None if no metric exists :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: How many minutes to look at :type lookback_period: int :type lookback_period: Length of the lookback period in minutes :type metric_name: str :param metric_name: Name of the metric to retrieve from CloudWatch :returns: list -- A list of time series data for the given metric, may be None if there was no data """ try: now = datetime.utcnow() start_time = now - timedelta(minutes=lookback_window_start) end_time = now - timedelta( minutes=lookback_window_start - lookback_period) return cloudwatch_connection.get_metric_statistics( period=lookback_period * 60, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise def __get_def_connection_cloudwatch(): """ Ensure connection to CloudWatch """ region = 'us-west-2' try: connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection def __get_aws_metric_by_time(table_name, start_time, end_time, metric_name, metric_type, period_sec): """ Returns a metric list from the AWS CloudWatch service based on specific time frame. """ try: return __get_def_connection_cloudwatch().get_metric_statistics( period=period_sec, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=[metric_type], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise if __name__ == '__main__': for metric_info in stdin: param = {} dot_arr = [] metric_info = metric_info.rstrip(" \t\n\r").split(' ') print '>>>> metrics for table: ', metric_info for i, key in enumerate([ 'table_name', 'start_time', 'end_time', 'metric_name', 'metric_type', 'period_sec' ]): if (i >= len(metric_info)): break print key, ' ', i, ' ', metric_info[i] param[key] = metric_info[i] data_points = __get_aws_metric_by_time( param['table_name'], dateutil.parser.parse(param['start_time']), dateutil.parser.parse(param['end_time']), param['metric_name'], param['metric_type'], param['period_sec']) for dot in sorted(data_points, key=lambda dt: dt['Timestamp']): value_out = float(dot[param['metric_type']]) if ('Average' == param['metric_type']): value_out = float(dot[param['metric_type']]) * 60 dot_arr.append({ 'Time' : str(dot['Timestamp']), 'Value' : int(value_out) }) for i, dot in enumerate(dot_arr): if (i > 0): dot['Delta'] = str(int((float(dot['Value'] - dot_arr[i - 1]['Value']) / float(dot_arr[i - 1]['Value'])) * 100)) + "%" print json.dumps(dot)
{ "content_hash": "d00daa80a446449b7762717b517d69a5", "timestamp": "", "source": "github", "line_count": 445, "max_line_length": 141, "avg_line_length": 34.96629213483146, "alnum_prop": 0.6245501285347044, "repo_name": "Spokeo/dynamic-dynamodb", "id": "375d86338dd3ab29d69430fcb4177b36cef9bc9a", "size": "15584", "binary": false, "copies": "1", "ref": "refs/heads/SB-3557", "path": "dynamic_dynamodb/statistics/table.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "152" }, { "name": "Python", "bytes": "273647" } ], "symlink_target": "" }
""" Sahana Eden Member Search Module Automated Tests @copyright: 2011-2016 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import time from gluon import current from tests.web2unittest import SeleniumUnitTest import functools def _kwsearch(instance, column, items, keyword): for item in [instance.dt_data_item(i, column) for i in xrange(1, items + 1)]: if not (keyword.strip().lower() in item.strip().lower()): return False return True class SearchStaff(SeleniumUnitTest): def setUp(self): super(SeleniumUnitTest, self).setUp() print "\n" self.login(account="admin", nexturl="hrm/staff/search?clear_opts=1") def test_hrm002_01_hrm_search_simple(self): """ @case: hrm002-01 @description: Search Members - Simple Search """ key="Mem" self.search(self.search.simple_form, True, [], {"tablename":"hrm_human_resource", "key":key, "filters":[("type",1)]}, manual_check=functools.partial(_kwsearch, keyword=key, items=1, column=2) ) def test_hrm002_02_hrm_search_advance_by_Organization(self): """ @case: hrm002-02 @description: Search Members - Advanced Search by Organization """ key="Finnish Red Cross (FRC)" self.search(self.search.advanced_form, True, ({ "name": "human_resource_search_org", "label": key, "value": True },), {"tablename":"hrm_human_resource", "filters":[("type",1)]}, manual_check=functools.partial(_kwsearch, keyword=key, items=1, column=4) ) def test_hrm002_03_hrm_search_advance_by_Facility(self): """ @case: hrm002-03 @description: Search Members - Advanced Search by Facility """ self.search(self.search.advanced_form, True, ( { "name": "human_resource_search_site", "label": "AP Zone (Office)", "value": True }, { "name": "human_resource_search_site", "label": "Victoria Branch Office (Office)", "value": True }, ), {"tablename":"hrm_human_resource", "filters":[("type",1)]}, manual_check=functools.partial(_kwsearch, keyword="(Office)", items=1, column=6) ) def test_hrm002_04_hrm_search_advance_by_Training(self): """ @case: hrm002-04 @description: Search Members - Advanced Search by Training """ key="Basics of First Aid" self.search(self.search.advanced_form, True, ({ "name": "human_resource_search_training", "label": key, "value": True },), {"tablename":"hrm_human_resource", "filters":[("type",1)]}, # "Basics of First Aid" is getting truncated manual_check=functools.partial(_kwsearch, keyword="Basics of Fir", items=1, column=9) )
{ "content_hash": "8eafb886876f98164852f2b6f22c6613", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 97, "avg_line_length": 36.7, "alnum_prop": 0.5735694822888283, "repo_name": "flavour/ifrc_qa", "id": "7f280150c1e1cd14d3e2573f7cb6528239ae91e5", "size": "4429", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "modules/tests/staff/staff_search.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "727" }, { "name": "CSS", "bytes": "3347085" }, { "name": "HTML", "bytes": "1367849" }, { "name": "JavaScript", "bytes": "20092291" }, { "name": "NSIS", "bytes": "3934" }, { "name": "PHP", "bytes": "15220" }, { "name": "Python", "bytes": "31198396" }, { "name": "Ruby", "bytes": "8291" }, { "name": "Shell", "bytes": "5059" }, { "name": "XSLT", "bytes": "3260831" } ], "symlink_target": "" }
import os import shutil import glob import time import sys import subprocess import string from optparse import OptionParser, make_option SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PKG_NAME = os.path.basename(SCRIPT_DIR) PARAMETERS = None #XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket" SRC_DIR = "" PKG_SRC_DIR = "" def doCMD(cmd): # Do not need handle timeout in this short script, let tool do it print "-->> \"%s\"" % cmd output = [] cmd_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) while True: cmd_return_code = cmd_proc.poll() if cmd_return_code != None: break if not cmd.endswith("&"): while True: line = cmd_proc.stdout.readline().strip("\r\n") print line if not line or line.find("daemon started") >= 0: break output.append(line) return (cmd_return_code, output) def updateCMD(cmd=None): if "pkgcmd" in cmd: cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd) return cmd def getUSERID(): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell id -u %s" % ( PARAMETERS.device, PARAMETERS.user) else: cmd = "ssh %s \"id -u %s\"" % ( PARAMETERS.device, PARAMETERS.user ) return doCMD(cmd) def getPKGID(pkg_name=None): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell %s" % ( PARAMETERS.device, updateCMD('pkgcmd -l')) else: cmd = "ssh %s \"%s\"" % ( PARAMETERS.device, updateCMD('pkgcmd -l')) (return_code, output) = doCMD(cmd) if return_code != 0: return None test_pkg_id = None for line in output: if line.find("[" + pkg_name + "]") != -1: pkgidIndex = line.split().index("pkgid") test_pkg_id = line.split()[pkgidIndex+1].strip("[]") break return test_pkg_id def doRemoteCMD(cmd=None): if PARAMETERS.mode == "SDB": cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd)) else: cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd)) return doCMD(cmd) def doRemoteCopy(src=None, dest=None): if PARAMETERS.mode == "SDB": cmd_prefix = "sdb -s %s push" % PARAMETERS.device cmd = "%s %s %s" % (cmd_prefix, src, dest) else: cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest) (return_code, output) = doCMD(cmd) doRemoteCMD("sync") if return_code != 0: return True else: return False def uninstPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".wgt"): pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0])) if not pkg_id: action_status = False continue (return_code, output) = doRemoteCMD( "pkgcmd -u -t wgt -q -n %s" % pkg_id) for line in output: if "Failure" in line: action_status = False break (return_code, output) = doRemoteCMD( "rm -rf %s" % PKG_SRC_DIR) if return_code != 0: action_status = False return action_status def instPKGs(): action_status = True (return_code, output) = doRemoteCMD( "mkdir -p %s" % PKG_SRC_DIR) if return_code != 0: action_status = False for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".wgt"): if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)): action_status = False (return_code, output) = doRemoteCMD( "pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file)) doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file)) for line in output: if "Failure" in line: action_status = False break for item in glob.glob("%s/*" % SCRIPT_DIR): if item.endswith(".wgt"): continue elif item.endswith("inst.py"): continue else: item_name = os.path.basename(item) if not doRemoteCopy(item, PKG_SRC_DIR+"/"+item_name): #if not doRemoteCopy(item, PKG_SRC_DIR): action_status = False return action_status def main(): try: usage = "usage: inst.py -i" opts_parser = OptionParser(usage=usage) opts_parser.add_option( "-m", dest="mode", action="store", help="Specify mode") opts_parser.add_option( "-s", dest="device", action="store", help="Specify device") opts_parser.add_option( "-i", dest="binstpkg", action="store_true", help="Install package") opts_parser.add_option( "-u", dest="buninstpkg", action="store_true", help="Uninstall package") opts_parser.add_option( "-a", dest="user", action="store", help="User name") global PARAMETERS (PARAMETERS, args) = opts_parser.parse_args() except Exception, e: print "Got wrong option: %s, exit ..." % e sys.exit(1) if not PARAMETERS.user: PARAMETERS.user = "app" global SRC_DIR, PKG_SRC_DIR SRC_DIR = "/home/%s/content" % PARAMETERS.user PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME) if not PARAMETERS.mode: PARAMETERS.mode = "SDB" if PARAMETERS.mode == "SDB": if not PARAMETERS.device: (return_code, output) = doCMD("sdb devices") for line in output: if str.find(line, "\tdevice") != -1: PARAMETERS.device = line.split("\t")[0] break else: PARAMETERS.mode = "SSH" if not PARAMETERS.device: print "No device provided" sys.exit(1) user_info = getUSERID() re_code = user_info[0] if re_code == 0 : global XW_ENV userid = user_info[1][0] XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid) else: print "[Error] cmd commands error : %s"%str(user_info[1]) sys.exit(1) if PARAMETERS.binstpkg and PARAMETERS.buninstpkg: print "-i and -u are conflict" sys.exit(1) if PARAMETERS.buninstpkg: if not uninstPKGs(): sys.exit(1) else: if not instPKGs(): sys.exit(1) if __name__ == "__main__": main() sys.exit(0)
{ "content_hash": "f415f721f8c6e5ed2b2823d148ec9938", "timestamp": "", "source": "github", "line_count": 224, "max_line_length": 106, "avg_line_length": 30.03125, "alnum_prop": 0.5376839601605471, "repo_name": "pk-sam/crosswalk-test-suite", "id": "c03f8719f878337ab44644644145bbdf4e26bbf8", "size": "6750", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "behavior/inst.wgt.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "28136" }, { "name": "CSS", "bytes": "697706" }, { "name": "CoffeeScript", "bytes": "18978" }, { "name": "Cucumber", "bytes": "63597" }, { "name": "GLSL", "bytes": "3495" }, { "name": "Groff", "bytes": "12" }, { "name": "HTML", "bytes": "39810614" }, { "name": "Java", "bytes": "602994" }, { "name": "JavaScript", "bytes": "17479410" }, { "name": "Makefile", "bytes": "1044" }, { "name": "PHP", "bytes": "44946" }, { "name": "Python", "bytes": "4304927" }, { "name": "Shell", "bytes": "1100341" }, { "name": "XSLT", "bytes": "767778" } ], "symlink_target": "" }
import locale import os import re from .basecase import BaseTestCase from .cassconnect import create_db, remove_db, testrun_cqlsh from .run_cqlsh import TimeoutError from cqlshlib.cql3handling import CqlRuleSet BEL = '\x07' # the terminal-bell character CTRL_C = '\x03' TAB = '\t' # completions not printed out in this many seconds may not be acceptable. # tune if needed for a slow system, etc, but be aware that the test will # need to wait this long for each completion test, to make sure more info # isn't coming COMPLETION_RESPONSE_TIME = 0.5 completion_separation_re = re.compile(r'\s+') class CqlshCompletionCase(BaseTestCase): @classmethod def setUpClass(cls): create_db() @classmethod def tearDownClass(cls): remove_db() def setUp(self): env = os.environ.copy() env['COLUMNS'] = '100000' if (locale.getpreferredencoding() != 'UTF-8'): env['LC_CTYPE'] = 'en_US.utf8' self.cqlsh_runner = testrun_cqlsh(cqlver=None, env=env) self.cqlsh = self.cqlsh_runner.__enter__() def tearDown(self): self.cqlsh_runner.__exit__(None, None, None) def _get_completions(self, inputstring, split_completed_lines=True): """ Get results of tab completion in cqlsh. Returns a bare string if a string completes immediately. Otherwise, returns a set of all whitespace-separated tokens in the offered completions by default, or a list of the lines in the offered completions if split_completed_lines is False. """ self.cqlsh.send(inputstring) self.cqlsh.send(TAB) immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME) immediate = immediate.replace(' \b', '') self.assertEqual(immediate[:len(inputstring)], inputstring) immediate = immediate[len(inputstring):] immediate = immediate.replace(BEL, '') if immediate: return immediate self.cqlsh.send(TAB) choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME) if choice_output == BEL: choice_output = '' self.cqlsh.send(CTRL_C) # cancel any current line self.cqlsh.read_to_next_prompt() choice_lines = choice_output.splitlines() if choice_lines: # ensure the last line of the completion is the prompt prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring) msg = ('Double-tab completion ' 'does not print prompt for input "{}"'.format(inputstring)) self.assertRegex(choice_lines[-1], prompt_regex, msg=msg) choice_lines = [line.strip() for line in choice_lines[:-1]] choice_lines = [line for line in choice_lines if line] if split_completed_lines: completed_lines = list(map(set, (completion_separation_re.split(line.strip()) for line in choice_lines))) if not completed_lines: return set() completed_tokens = set.union(*completed_lines) return completed_tokens - {''} else: return choice_lines assert False def _trycompletions_inner(self, inputstring, immediate='', choices=(), other_choices_ok=False, split_completed_lines=True): """ Test tab completion in cqlsh. Enters in the text in inputstring, then simulates a tab keypress to see what is immediately completed (this should only happen when there is only one completion possible). If there is an immediate completion, the new text is expected to match 'immediate'. If there is no immediate completion, another tab keypress is simulated in order to get a list of choices, which are expected to match the items in 'choices' (order is not important, but case is). """ completed = self._get_completions(inputstring, split_completed_lines=split_completed_lines) if immediate: msg = 'cqlsh completed %r (%d), but we expected %r (%d)' % (completed, len(completed), immediate, len(immediate)) self.assertEqual(completed, immediate, msg=msg) return if other_choices_ok: self.assertEqual(set(choices), completed.intersection(choices)) else: self.assertEqual(set(choices), set(completed)) def trycompletions(self, inputstring, immediate='', choices=(), other_choices_ok=False, split_completed_lines=True): try: self._trycompletions_inner(inputstring, immediate, choices, other_choices_ok=other_choices_ok, split_completed_lines=split_completed_lines) finally: try: self.cqlsh.send(CTRL_C) # cancel any current line self.cqlsh.read_to_next_prompt(timeout=1.0) except TimeoutError: # retry once self.cqlsh.send(CTRL_C) self.cqlsh.read_to_next_prompt(timeout=10.0) def strategies(self): return CqlRuleSet.replication_strategies class TestCqlshCompletion(CqlshCompletionCase): cqlver = '3.1.6' def test_complete_on_empty_string(self): self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS')) def test_complete_command_words(self): self.trycompletions('alt', '\b\b\bALTER ') self.trycompletions('I', 'NSERT INTO ') self.trycompletions('exit', ' ') def test_complete_in_uuid(self): pass def test_complete_in_select(self): pass def test_complete_in_insert(self): self.trycompletions('INSERT INTO ', choices=('twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'system.', 'empty_composite_table', 'empty_table', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs'), other_choices_ok=True) self.trycompletions('INSERT INTO twenty_rows_composite_table', immediate=' ') self.trycompletions('INSERT INTO twenty_rows_composite_table ', choices=['(', 'JSON']) self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ', choices=(')', ',')) self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ', immediate='c ') self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ', choices=(',', ')')) self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)', immediate=' VALUES ( ') self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL', immediate='UES ( ') self.trycompletions( 'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (', ['<value for a (text)>'], split_completed_lines=False) self.trycompletions( "INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('", ['<value for a (text)>'], split_completed_lines=False) self.trycompletions( "INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs", ['<value for a (text)>'], split_completed_lines=False) self.trycompletions( "INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'", immediate=', ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs',"), ['<value for b (text)>'], split_completed_lines=False) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam')"), immediate=' ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') "), choices=[';', 'USING', 'IF']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam');"), choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP', 'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING', 'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') US"), immediate='ING T') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING"), immediate=' T') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING T"), choices=['TTL', 'TIMESTAMP']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TT"), immediate='L ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TI"), immediate='MESTAMP ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "), choices=['<wholenumber>']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL "), choices=['<wholenumber>']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "), choices=['AND', ';']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "), choices=['AND', ';']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"), immediate='ND TTL ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"), immediate='ND TIMESTAMP ') self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "), choices=['<wholenumber>']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "), choices=['AND', ';']) self.trycompletions( ("INSERT INTO twenty_rows_composite_table (a, b, c) " "VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "), choices=[]) def test_complete_in_update(self): self.trycompletions("UPD", immediate="ATE ") self.trycompletions("UPDATE ", choices=['twenty_rows_table', 'users', 'has_all_types', 'system.', 'ascii_with_special_chars', 'empty_composite_table', 'empty_table', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs'], other_choices_ok=True) self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET']) self.trycompletions("UPDATE empty_table S", immediate='ET lonelycol = ') self.trycompletions("UPDATE empty_table SET lon", immediate='elycol = ') self.trycompletions("UPDATE empty_table SET lonelycol", immediate=' = ') self.trycompletions("UPDATE empty_table U", immediate='SING T') self.trycompletions("UPDATE empty_table USING T", choices=["TTL", "TIMESTAMP"]) self.trycompletions("UPDATE empty_table SET lonelycol = ", choices=['<term (text)>'], split_completed_lines=False) self.trycompletions("UPDATE empty_table SET lonelycol = 'eg", choices=['<term (text)>'], split_completed_lines=False) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'", choices=[',', 'WHERE']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ", choices=['TOKEN(', 'lonelykey']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel", immediate='ykey ') self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ", choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '[']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ", choices=['AND', 'IF', ';']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ", choices=['TOKEN(', 'lonelykey']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ", choices=[',', ')']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ", choices=['=', '<=', '>=', '<', '>']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ", choices=[';', 'AND', 'IF']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ", choices=['EXISTS', '<quotedName>', '<identifier>']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ", choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.', 'CONTAINS']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF lonelykey ", choices=['>=', '!=', '<=', 'IN', '=', '<', '>', 'CONTAINS']) self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF lonelykey CONTAINS ", choices=['false', 'true', '<pgStringLiteral>', '-', '<float>', 'TOKEN', '<identifier>', '<uuid>', '{', '[', 'NULL', '<quotedStringLiteral>', '<blobLiteral>', '<wholenumber>', 'KEY']) def test_complete_in_delete(self): self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>']) self.trycompletions('DELETE a ', choices=['FROM', '[', '.', ',']) self.trycompletions('DELETE a [', choices=['<wholenumber>', 'false', '-', '<uuid>', '<pgStringLiteral>', '<float>', 'TOKEN', '<identifier>', '<quotedStringLiteral>', '{', '[', 'NULL', 'true', '<blobLiteral>']) self.trycompletions('DELETE a, ', choices=['<identifier>', '<quotedName>']) self.trycompletions('DELETE a FROM ', choices=['twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'system.', 'empty_composite_table', 'empty_table', 'system_auth.', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs', self.cqlsh.keyspace + '.'], other_choices_ok=True) self.trycompletions('DELETE FROM ', choices=['twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'system.', 'empty_composite_table', 'empty_table', 'system_auth.', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs', 'system_auth.', 'system_distributed.', 'system_schema.', 'system_traces.', self.cqlsh.keyspace + '.'], other_choices_ok=True) self.trycompletions('DELETE FROM twenty_rows_composite_table ', choices=['USING', 'WHERE']) self.trycompletions('DELETE FROM twenty_rows_composite_table U', immediate='SING TIMESTAMP ') self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ', choices=['<wholenumber>']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0', choices=['<wholenumber>']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ', immediate='WHERE ') self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ', choices=['a', 'b', 'TOKEN(']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ', choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(', immediate='a ') self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a', immediate=' ') self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ', choices=[')', ',']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ', choices=['>=', '<=', '=', '<', '>']) self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ', choices=['false', 'true', '<pgStringLiteral>', 'token(', '-', '<float>', 'TOKEN', '<identifier>', '<uuid>', '{', '[', 'NULL', '<quotedStringLiteral>', '<blobLiteral>', '<wholenumber>']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) '), choices=['AND', 'IF', ';']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) IF '), choices=['EXISTS', '<identifier>', '<quotedName>']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) IF b '), choices=['>=', '!=', '<=', 'IN', '=', '<', '>', 'CONTAINS']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) IF b CONTAINS '), choices=['false', 'true', '<pgStringLiteral>', '-', '<float>', 'TOKEN', '<identifier>', '<uuid>', '{', '[', 'NULL','<quotedStringLiteral>', '<blobLiteral>','<wholenumber>', 'KEY']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) IF b < 0 '), choices=['AND', ';']) self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ' 'TOKEN(a) >= TOKEN(0) IF b < 0 AND '), choices=['<identifier>', '<quotedName>']) self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE " "b = 'eggs'"), choices=['AND', 'IF', ';']) def test_complete_in_begin_batch(self): self.trycompletions('BEGIN ', choices=['BATCH', 'COUNTER', 'UNLOGGED']) self.trycompletions('BEGIN BATCH ', choices=['DELETE', 'INSERT', 'UPDATE', 'USING']) self.trycompletions('BEGIN BATCH INSERT ', immediate='INTO ' ) def test_complete_in_create_keyspace(self): self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF')) self.trycompletions('create keyspace moo ', "WITH replication = {'class': '") self.trycompletions('create keyspace "12SomeName" with ', "replication = {'class': '") self.trycompletions("create keyspace fjdkljf with foo=bar ", "", choices=('AND', ';')) self.trycompletions("create keyspace fjdkljf with foo=bar AND ", "replication = {'class': '") self.trycompletions("create keyspace moo with replication", " = {'class': '") self.trycompletions("create keyspace moo with replication=", " {'class': '") self.trycompletions("create keyspace moo with replication={", "'class':'") self.trycompletions("create keyspace moo with replication={'class'", ":'") self.trycompletions("create keyspace moo with replication={'class': ", "'") self.trycompletions("create keyspace moo with replication={'class': '", "", choices=self.strategies()) # ttl is an "unreserved keyword". should work self.trycompletions("create keySPACE ttl with replication =" "{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ") self.trycompletions("create keyspace ttl with replication =" "{'class':'SimpleStrategy',", " 'replication_factor': ") self.trycompletions("create keyspace \"ttl\" with replication =" "{'class': 'SimpleStrategy', ", "'replication_factor': ") self.trycompletions("create keyspace \"ttl\" with replication =" "{'class': 'SimpleStrategy', 'repl", "ication_factor'") self.trycompletions("create keyspace foo with replication =" "{'class': 'SimpleStrategy', 'replication_factor': ", '', choices=('<term>',)) self.trycompletions("create keyspace foo with replication =" "{'class': 'SimpleStrategy', 'replication_factor': 1", '', choices=('<term>',)) self.trycompletions("create keyspace foo with replication =" "{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}') self.trycompletions("create keyspace foo with replication =" "{'class': 'SimpleStrategy', 'replication_factor': 1, ", '', choices=()) self.trycompletions("create keyspace foo with replication =" "{'class': 'SimpleStrategy', 'replication_factor': 1} ", '', choices=('AND', ';')) self.trycompletions("create keyspace foo with replication =" "{'class': 'NetworkTopologyStrategy', ", '', choices=('<dc_name>',)) self.trycompletions("create keyspace \"PB and J\" with replication={" "'class': 'NetworkTopologyStrategy'", ', ') self.trycompletions("create keyspace PBJ with replication={" "'class': 'NetworkTopologyStrategy'} and ", "durable_writes = '") def test_complete_in_string_literals(self): # would be great if we could get a space after this sort of completion, # but readline really wants to make things difficult for us self.trycompletions("create keyspace blah with replication = {'class': 'Sim", "pleStrategy'") def test_complete_in_drop(self): self.trycompletions('DR', immediate='OP ') self.trycompletions('DROP ', choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION', 'INDEX', 'KEYSPACE', 'ROLE', 'TABLE', 'TRIGGER', 'TYPE', 'USER', 'MATERIALIZED']) def test_complete_in_drop_keyspace(self): self.trycompletions('DROP K', immediate='EYSPACE ') quoted_keyspace = '"' + self.cqlsh.keyspace + '"' self.trycompletions('DROP KEYSPACE ', choices=['IF', self.cqlsh.keyspace]) self.trycompletions('DROP KEYSPACE ' + quoted_keyspace, choices=[';']) self.trycompletions('DROP KEYSPACE I', immediate='F EXISTS ' + self.cqlsh.keyspace + ' ;') def test_complete_in_create_type(self): self.trycompletions('CREATE TYPE foo ', choices=['(', '.']) def test_complete_in_drop_type(self): self.trycompletions('DROP TYPE ', choices=['IF', 'system_views.', 'tags', 'system_traces.', 'system_distributed.', 'phone_number', 'band_info_type', 'address', 'system.', 'system_schema.', 'system_auth.', 'system_virtual_schema.', self.cqlsh.keyspace + '.' ]) def test_complete_in_create_trigger(self): self.trycompletions('CREATE TRIGGER ', choices=['<identifier>', '<quotedName>', 'IF' ]) self.trycompletions('CREATE TRIGGER foo ', immediate='ON ' ) self.trycompletions('CREATE TRIGGER foo ON ', choices=['system.', 'system_auth.', 'system_distributed.', 'system_schema.', 'system_traces.', 'system_views.', 'system_virtual_schema.' ], other_choices_ok=True) def create_columnfamily_table_template(self, name): """Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since they're synonyms, they should have the same completion behavior, so this test avoids duplication between tests for the two statements.""" prefix = 'CREATE ' + name + ' ' quoted_keyspace = '"' + self.cqlsh.keyspace + '"' self.trycompletions(prefix + '', choices=['IF', self.cqlsh.keyspace, '<new_table_name>']) self.trycompletions(prefix + 'IF ', immediate='NOT EXISTS ') self.trycompletions(prefix + 'IF NOT EXISTS ', choices=['<new_table_name>', self.cqlsh.keyspace]) self.trycompletions(prefix + 'IF NOT EXISTS new_table ', immediate='( ') self.trycompletions(prefix + quoted_keyspace, choices=['.', '(']) self.trycompletions(prefix + quoted_keyspace + '( ', choices=['<new_column_name>', '<identifier>', '<quotedName>']) self.trycompletions(prefix + quoted_keyspace + '.', choices=['<new_table_name>']) self.trycompletions(prefix + quoted_keyspace + '.new_table ', immediate='( ') self.trycompletions(prefix + quoted_keyspace + '.new_table ( ', choices=['<new_column_name>', '<identifier>', '<quotedName>']) self.trycompletions(prefix + ' new_table ( ', choices=['<new_column_name>', '<identifier>', '<quotedName>']) self.trycompletions(prefix + ' new_table (col_a ine', immediate='t ') self.trycompletions(prefix + ' new_table (col_a int ', choices=[',', 'PRIMARY']) self.trycompletions(prefix + ' new_table (col_a int P', immediate='RIMARY KEY ') self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ', choices=[')', ',']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,', choices=['<identifier>', '<quotedName>']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)', immediate=' ') self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ', choices=[';', 'WITH']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W', immediate='ITH ') self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ', choices=['allow_auto_snapshot', 'bloom_filter_fp_chance', 'compaction', 'compression', 'default_time_to_live', 'gc_grace_seconds', 'max_index_interval', 'memtable', 'memtable_flush_period_in_ms', 'CLUSTERING', 'COMPACT', 'caching', 'comment', 'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc', 'read_repair']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ', choices=['allow_auto_snapshot', 'bloom_filter_fp_chance', 'compaction', 'compression', 'default_time_to_live', 'gc_grace_seconds', 'max_index_interval', 'memtable', 'memtable_flush_period_in_ms', 'CLUSTERING', 'COMPACT', 'caching', 'comment', 'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc', 'read_repair']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ', immediate='= ') self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ', choices=['<float_between_0_and_1>']) self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ', immediate="= {'class': '") self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': '", choices=['SizeTieredCompactionStrategy', 'LeveledCompactionStrategy', 'DateTieredCompactionStrategy', 'TimeWindowCompactionStrategy']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'S", immediate="izeTieredCompactionStrategy'") self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy", immediate="'") self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy'", choices=['}', ',']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy', ", immediate="'") self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy', '", choices=['bucket_high', 'bucket_low', 'class', 'enabled', 'max_threshold', 'min_sstable_size', 'min_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones', 'provide_overlapping_tombstones']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy'}", choices=[';', 'AND']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'SizeTieredCompactionStrategy'} AND ", choices=['allow_auto_snapshot', 'bloom_filter_fp_chance', 'compaction', 'compression', 'default_time_to_live', 'gc_grace_seconds', 'max_index_interval', 'memtable', 'memtable_flush_period_in_ms', 'CLUSTERING', 'COMPACT', 'caching', 'comment', 'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc', 'read_repair']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'DateTieredCompactionStrategy', '", choices=['base_time_seconds', 'max_sstable_age_days', 'timestamp_resolution', 'min_threshold', 'class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction', 'max_window_size_seconds', 'only_purge_repaired_tombstones', 'provide_overlapping_tombstones']) self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = " + "{'class': 'TimeWindowCompactionStrategy', '", choices=['compaction_window_unit', 'compaction_window_size', 'timestamp_resolution', 'min_threshold', 'class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones', 'provide_overlapping_tombstones']) def test_complete_in_create_columnfamily(self): self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM']) self.trycompletions('CREATE CO', immediate='LUMNFAMILY ') self.create_columnfamily_table_template('COLUMNFAMILY') def test_complete_in_create_materializedview(self): self.trycompletions('CREATE MAT', immediate='ERIALIZED VIEW ') self.trycompletions('CREATE MATERIALIZED VIEW AS ', choices=['AS', 'SELECT']) self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * ', immediate='FROM ') self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers ', immediate = 'WHERE ') self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers WHERE host_id ', immediate='IS NOT NULL ' ) self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers WHERE host_id IS NOT NULL PR', immediate='IMARY KEY ( ') self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers WHERE host_id IS NOT NULL PRIMARY KEY (host_id) ', choices=[';','WITH']) self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers WHERE host_id IS NOT NULL PRIMARY KEY (a, b) ', choices=[';','WITH']) self.trycompletions('CREATE MATERIALIZED VIEW AS SELECT * FROM system.peers WHERE host_id IS NOT NULL PRIMARY KEY ((a,b), c) ', choices=[';','WITH']) def test_complete_in_create_table(self): self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE']) self.trycompletions('CREATE TA', immediate='BLE ') self.create_columnfamily_table_template('TABLE') def test_complete_in_describe(self): # Cassandra-10733 self.trycompletions('DES', immediate='C') # quoted_keyspace = '"' + self.cqlsh.keyspace + '"' self.trycompletions('DESCR', immediate='IBE ') self.trycompletions('DESC TABLE ', choices=['twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'system.', 'empty_composite_table', 'empty_table', 'system_auth.', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs', 'system_distributed.', self.cqlsh.keyspace + '.'], other_choices_ok=True) self.trycompletions('DESC TYPE ', choices=['system.', 'system_auth.', 'system_traces.', 'system_distributed.', 'address', 'phone_number', 'band_info_type', 'tags'], other_choices_ok=True) self.trycompletions('DESC FUNCTION ', choices=['system.', 'system_auth.', 'system_traces.', 'system_distributed.', 'fbestband', 'fbestsong', 'fmax', 'fmin', self.cqlsh.keyspace + '.'], other_choices_ok=True) self.trycompletions('DESC AGGREGATE ', choices=['system.', 'system_auth.', 'system_traces.', 'system_distributed.', 'aggmin', 'aggmax', self.cqlsh.keyspace + '.'], other_choices_ok=True) # Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes; # cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the # test here though in case we ever change this script to test using keyspace names without # quotes # self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.') self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".', choices=['twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'empty_composite_table', 'empty_table', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'songs'], other_choices_ok=True) # See comment above for DESC TABLE # self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.') self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".', choices=['address', 'phone_number', 'band_info_type', 'tags'], other_choices_ok=True) # See comment above for DESC TABLE # self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f') self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f') self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f', choices=['fbestband', 'fbestsong', 'fmax', 'fmin'], other_choices_ok=True) # See comment above for DESC TABLE # self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm') self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm') self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm', choices=['aggmin', 'aggmax'], other_choices_ok=True) def test_complete_in_drop_table(self): self.trycompletions('DROP T', choices=['TABLE', 'TRIGGER', 'TYPE']) self.trycompletions('DROP TA', immediate='BLE ') def test_complete_in_truncate(self): self.trycompletions('TR', choices=['TRACING', 'TRUNCATE']) self.trycompletions('TRU', immediate='NCATE ') self.trycompletions('TRUNCATE T', choices=['TABLE', 'twenty_rows_composite_table', 'twenty_rows_table']) def test_complete_in_use(self): self.trycompletions('US', immediate='E ') self.trycompletions('USE ', choices=[self.cqlsh.keyspace, 'system', 'system_auth', 'system_distributed', 'system_schema', 'system_traces', 'system_views', 'system_virtual_schema' ]) def test_complete_in_create_index(self): self.trycompletions('CREATE I', immediate='NDEX ') self.trycompletions('CREATE INDEX ', choices=['<new_index_name>', 'IF', 'ON']) self.trycompletions('CREATE INDEX example ', immediate='ON ') def test_complete_in_drop_index(self): self.trycompletions('DROP I', immediate='NDEX ') def test_complete_in_alter_keyspace(self): self.trycompletions('ALTER KEY', 'SPACE ') self.trycompletions('ALTER KEYSPACE ', '', choices=[self.cqlsh.keyspace, 'system_auth', 'system_distributed', 'system_traces', 'IF']) self.trycompletions('ALTER KEYSPACE I', immediate='F EXISTS ') self.trycompletions('ALTER KEYSPACE system_trac', "es WITH replication = {'class': '") self.trycompletions("ALTER KEYSPACE system_traces WITH replication = {'class': '", '', choices=['NetworkTopologyStrategy', 'SimpleStrategy']) def test_complete_in_grant(self): self.trycompletions("GR", immediate='ANT ') self.trycompletions("GRANT ", choices=['ALL', 'ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'MODIFY', 'SELECT'], other_choices_ok=True) self.trycompletions("GRANT MODIFY ", choices=[',', 'ON', 'PERMISSION']) self.trycompletions("GRANT MODIFY P", immediate='ERMISSION ') self.trycompletions("GRANT MODIFY PERMISSION ", choices=[',', 'ON']) self.trycompletions("GRANT MODIFY PERMISSION, ", choices=['ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'SELECT']) self.trycompletions("GRANT MODIFY PERMISSION, D", choices=['DESCRIBE', 'DROP']) self.trycompletions("GRANT MODIFY PERMISSION, DR", immediate='OP ') self.trycompletions("GRANT MODIFY PERMISSION, DROP O", immediate='N ') self.trycompletions("GRANT MODIFY, DROP ON ", choices=['ALL', 'KEYSPACE', 'MBEANS', 'ROLE', 'FUNCTION', 'MBEAN', 'TABLE'], other_choices_ok=True) self.trycompletions("GRANT MODIFY, DROP ON ALL ", choices=['KEYSPACES', 'TABLES'], other_choices_ok=True) self.trycompletions("GRANT MODIFY PERMISSION ON KEY", immediate='SPACE ') self.trycompletions("GRANT MODIFY PERMISSION ON KEYSPACE system_tr", immediate='aces TO ') def test_complete_in_revoke(self): self.trycompletions("RE", immediate='VOKE ') self.trycompletions("REVOKE ", choices=['ALL', 'ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'MODIFY', 'SELECT'], other_choices_ok=True) self.trycompletions("REVOKE MODIFY ", choices=[',', 'ON', 'PERMISSION']) self.trycompletions("REVOKE MODIFY P", immediate='ERMISSION ') self.trycompletions("REVOKE MODIFY PERMISSION ", choices=[',', 'ON']) self.trycompletions("REVOKE MODIFY PERMISSION, ", choices=['ALTER', 'AUTHORIZE', 'CREATE', 'DESCRIBE', 'DROP', 'EXECUTE', 'SELECT']) self.trycompletions("REVOKE MODIFY PERMISSION, D", choices=['DESCRIBE', 'DROP']) self.trycompletions("REVOKE MODIFY PERMISSION, DR", immediate='OP ') self.trycompletions("REVOKE MODIFY PERMISSION, DROP ", choices=[',', 'ON', 'PERMISSION']) self.trycompletions("REVOKE MODIFY PERMISSION, DROP O", immediate='N ') self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON ", choices=['ALL', 'KEYSPACE', 'MBEANS', 'ROLE', 'FUNCTION', 'MBEAN', 'TABLE'], other_choices_ok=True) self.trycompletions("REVOKE MODIFY, DROP ON ALL ", choices=['KEYSPACES', 'TABLES'], other_choices_ok=True) self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON KEY", immediate='SPACE ') self.trycompletions("REVOKE MODIFY PERMISSION, DROP ON KEYSPACE system_tr", immediate='aces FROM ') def test_complete_in_alter_table(self): self.trycompletions('ALTER TABLE I', immediate='F EXISTS ') self.trycompletions('ALTER TABLE IF', immediate=' EXISTS ') self.trycompletions('ALTER TABLE ', choices=['IF', 'twenty_rows_table', 'ascii_with_special_chars', 'users', 'has_all_types', 'system.', 'empty_composite_table', 'empty_table', 'system_auth.', 'undefined_values_table', 'dynamic_columns', 'twenty_rows_composite_table', 'utf8_with_special_chars', 'system_traces.', 'songs', 'system_views.', 'system_virtual_schema.', 'system_schema.', 'system_distributed.', self.cqlsh.keyspace + '.']) self.trycompletions('ALTER TABLE IF EXISTS new_table ADD ', choices=['<new_column_name>', 'IF']) self.trycompletions('ALTER TABLE IF EXISTS new_table ADD IF NOT EXISTS ', choices=['<new_column_name>']) self.trycompletions('ALTER TABLE new_table ADD IF NOT EXISTS ', choices=['<new_column_name>']) self.trycompletions('ALTER TABLE IF EXISTS new_table RENAME ', choices=['IF', '<quotedName>', '<identifier>']) self.trycompletions('ALTER TABLE new_table RENAME ', choices=['IF', '<quotedName>', '<identifier>']) self.trycompletions('ALTER TABLE IF EXISTS new_table DROP ', choices=['IF', '<quotedName>', '<identifier>']) def test_complete_in_alter_type(self): self.trycompletions('ALTER TYPE I', immediate='F EXISTS ') self.trycompletions('ALTER TYPE ', choices=['IF', 'system_views.', 'tags', 'system_traces.', 'system_distributed.', 'phone_number', 'band_info_type', 'address', 'system.', 'system_schema.', 'system_auth.', 'system_virtual_schema.', self.cqlsh.keyspace + '.' ]) self.trycompletions('ALTER TYPE IF EXISTS new_type ADD ', choices=['<new_field_name>', 'IF']) self.trycompletions('ALTER TYPE IF EXISTS new_type ADD IF NOT EXISTS ', choices=['<new_field_name>']) self.trycompletions('ALTER TYPE IF EXISTS new_type RENAME ', choices=['IF', '<quotedName>', '<identifier>']) def test_complete_in_alter_user(self): self.trycompletions('ALTER USER ', choices=['<identifier>', 'IF', '<pgStringLiteral>', '<quotedStringLiteral>']) def test_complete_in_create_role(self): self.trycompletions('CREATE ROLE ', choices=['<identifier>', 'IF', '<quotedName>']) self.trycompletions('CREATE ROLE IF ', immediate='NOT EXISTS '); self.trycompletions('CREATE ROLE foo WITH ', choices=['ACCESS', 'HASHED', 'LOGIN', 'OPTIONS', 'PASSWORD', 'SUPERUSER']) self.trycompletions('CREATE ROLE foo WITH HASHED ', immediate='PASSWORD = '); self.trycompletions('CREATE ROLE foo WITH ACCESS TO ', choices=['ALL', 'DATACENTERS']) self.trycompletions('CREATE ROLE foo WITH ACCESS TO ALL ', immediate='DATACENTERS ') def test_complete_in_alter_role(self): self.trycompletions('ALTER ROLE ', choices=['<identifier>', 'IF', '<quotedName>']) self.trycompletions('ALTER ROLE foo ', immediate='WITH ') self.trycompletions('ALTER ROLE foo WITH ', choices=['ACCESS', 'HASHED', 'LOGIN', 'OPTIONS', 'PASSWORD', 'SUPERUSER']) self.trycompletions('ALTER ROLE foo WITH ACCESS TO ', choices=['ALL', 'DATACENTERS']) def test_complete_in_drop_role(self): self.trycompletions('DROP ROLE ', choices=['<identifier>', 'IF', '<quotedName>']) def test_complete_in_list(self): self.trycompletions('LIST ', choices=['ALL', 'AUTHORIZE', 'DESCRIBE', 'EXECUTE', 'ROLES', 'USERS', 'ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT']) # Non-CQL Shell Commands def test_complete_in_capture(self): self.trycompletions('CAPTURE ', choices=['OFF', ';', '<enter>'], other_choices_ok=True) def test_complete_in_paging(self): self.trycompletions('PAGING ', choices=['ON', 'OFF', ';', '<enter>', '<wholenumber>' ] ) self.trycompletions('PAGING 50 ', choices=[';', '<enter>' ] ) def test_complete_in_serial(self): self.trycompletions('SERIAL CONSISTENCY ', choices=[';', '<enter>', 'LOCAL_SERIAL', 'SERIAL']) def test_complete_in_show(self): self.trycompletions('SHOW ', choices=['HOST', 'REPLICAS', 'SESSION', 'VERSION']) self.trycompletions('SHOW SESSION ', choices=['<uuid>']) self.trycompletions('SHOW REPLICAS ', choices=['-', '<wholenumber>']) def test_complete_in_tracing(self): self.trycompletions('TRACING ', choices=[';', '<enter>', 'OFF', 'ON'])
{ "content_hash": "43ac377ec22b3386b935f3e71c2ba1ef", "timestamp": "", "source": "github", "line_count": 1001, "max_line_length": 157, "avg_line_length": 55.83016983016983, "alnum_prop": 0.49885481158071787, "repo_name": "jrwest/cassandra", "id": "c52624b1ed9a0bc9826c98f88d5bb88fea52df52", "size": "56792", "binary": false, "copies": "2", "ref": "refs/heads/trunk", "path": "pylib/cqlshlib/test/test_cqlsh_completion.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "AMPL", "bytes": "801" }, { "name": "GAP", "bytes": "91752" }, { "name": "HTML", "bytes": "265026" }, { "name": "Java", "bytes": "34039812" }, { "name": "Lex", "bytes": "10152" }, { "name": "Python", "bytes": "562130" }, { "name": "Shell", "bytes": "120197" } ], "symlink_target": "" }
from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetReference(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetReference Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetReference, self).__init__(temboo_session, '/Library/GitHub/GitDataAPI/References/GetReference') def new_input_set(self): return GetReferenceInputSet() def _make_result_set(self, result, path): return GetReferenceResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetReferenceChoreographyExecution(session, exec_id, path) class GetReferenceInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetReference Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((conditional, string) The Access Token retrieved during the OAuth process. Required when accessing a protected resource.) """ super(GetReferenceInputSet, self)._set_input('AccessToken', value) def set_Ref(self, value): """ Set the value of the Ref input for this Choreo. ((required, string) The reference to retrieve. Must be formatted as refs/heads/branch. Refs can be retrieved by running the GetAllReferences and parsing the value for "ref".) """ super(GetReferenceInputSet, self)._set_input('Ref', value) def set_Repo(self, value): """ Set the value of the Repo input for this Choreo. ((required, string) The name of the repo associated with the references to retrieve.) """ super(GetReferenceInputSet, self)._set_input('Repo', value) def set_User(self, value): """ Set the value of the User input for this Choreo. ((required, string) The GitHub username.) """ super(GetReferenceInputSet, self)._set_input('User', value) class GetReferenceResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetReference Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from GitHub.) """ return self._output.get('Response', None) def get_Limit(self): """ Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The available rate limit for your account. This is returned in the GitHub response header.) """ return self._output.get('Limit', None) def get_Remaining(self): """ Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The remaining number of API requests available to you. This is returned in the GitHub response header.) """ return self._output.get('Remaining', None) class GetReferenceChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetReferenceResultSet(response, path)
{ "content_hash": "25cae8a41a30a1067c19c51ea602b902", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 230, "avg_line_length": 43.7037037037037, "alnum_prop": 0.690677966101695, "repo_name": "jordanemedlock/psychtruths", "id": "f99efa7267fd60f6f8c93fb0b70f150c418d9daa", "size": "4476", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "temboo/Library/GitHub/GitDataAPI/References/GetReference.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "18544" }, { "name": "HTML", "bytes": "34650" }, { "name": "JavaScript", "bytes": "423" }, { "name": "PHP", "bytes": "1097" }, { "name": "Python", "bytes": "23444578" } ], "symlink_target": "" }
import datetime, logging import platform HOST = platform.uname()[1] class NullHandler(logging.Handler): def emit(self, record): pass class MockHandler(logging.Handler): def __init__(self, *args, **kwargs): self.msgs = [] logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.msgs.append(record) class DatabaseHandler(logging.Handler): def emit(self, record): from jogging.models import Log if hasattr(record, 'source'): source = record.source else: source = record.name try: Log.objects.create(source=source, level=record.levelname, msg=record.msg, host=HOST) except: # squelching exceptions sucks, but 500-ing because of a logging error sucks more pass class EmailHandler(logging.Handler): def __init__(self, from_email=None, recipient_spec=None, fail_silently=False, auth_user=None, auth_password=None, *args, **kwargs): logging.Handler.__init__(self, *args, **kwargs) self.recipient_spec = recipient_spec or () self.from_email = from_email self.auth_user = auth_user self.auth_password = auth_password self.fail_silently = fail_silently def emit(self, record): from django.conf import settings from django.core.mail import send_mail if hasattr(record, 'source'): source = record.source else: source = record.name send_mail( subject="%s[%s] %s: %s" % (settings.EMAIL_SUBJECT_PREFIX, HOST, source, record.levelname.upper()), message=record.msg, from_email=self.from_email or settings.SERVER_EMAIL, recipient_list=[a[1] for a in (self.recipient_spec or settings.ADMINS)], fail_silently=self.fail_silently, auth_user=self.auth_user, auth_password=self.auth_password, )
{ "content_hash": "8bb85991d5ab49b75becc3277bb53b1e", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 135, "avg_line_length": 33.559322033898304, "alnum_prop": 0.6075757575757575, "repo_name": "zain/jogging", "id": "7c7d5c84d796958a6621245b506d8af20ba8f08b", "size": "1980", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jogging/handlers.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "16703" } ], "symlink_target": "" }
import os, sys import time from test.testlib import * from git import * class TestRepo(object): def setup(self): self.repo = Repo(GIT_REPO) @raises(InvalidGitRepositoryError) def test_new_should_raise_on_invalid_repo_location(self): if sys.platform == "win32": Repo("C:\\WINDOWS\\Temp") else: Repo("/tmp") @raises(NoSuchPathError) def test_new_should_raise_on_non_existant_path(self): Repo("repos/foobar") def test_description(self): txt = "Test repository" self.repo.description = txt assert_equal(self.repo.description, txt) def test_heads_should_return_array_of_head_objects(self): for head in self.repo.heads: assert_equal(Head, head.__class__) @patch(Git, '_call_process') def test_heads_should_populate_head_data(self, git): git.return_value = fixture('for_each_ref') head = self.repo.heads[0] assert_equal('master', head.name) assert_equal('634396b2f541a9f2d58b00be1a07f0c358b999b3', head.commit.id) assert_true(git.called) assert_equal(git.call_args, (('for_each_ref', 'refs/heads'), {'sort': 'committerdate', 'format': '%(refname)%00%(objectname)'})) @patch(Git, '_call_process') def test_commits(self, git): git.return_value = fixture('rev_list') commits = self.repo.commits('master', 10) c = commits[0] assert_equal('4c8124ffcf4039d292442eeccabdeca5af5c5017', c.id) assert_equal(["634396b2f541a9f2d58b00be1a07f0c358b999b3"], [p.id for p in c.parents]) assert_equal("672eca9b7f9e09c22dcb128c283e8c3c8d7697a4", c.tree.id) assert_equal("Tom Preston-Werner", c.author.name) assert_equal("tom@mojombo.com", c.author.email) assert_equal(time.gmtime(1191999972), c.authored_date) assert_equal("Tom Preston-Werner", c.committer.name) assert_equal("tom@mojombo.com", c.committer.email) assert_equal(time.gmtime(1191999972), c.committed_date) assert_equal("implement Grit#heads", c.message) c = commits[1] assert_equal([], c.parents) c = commits[2] assert_equal(["6e64c55896aabb9a7d8e9f8f296f426d21a78c2c", "7f874954efb9ba35210445be456c74e037ba6af2"], map(lambda p: p.id, c.parents)) assert_equal("Merge branch 'site'", c.message) assert_true(git.called) assert_equal(git.call_args, (('rev_list', 'master'), {'skip': 0, 'pretty': 'raw', 'max_count': 10})) @patch(Git, '_call_process') def test_commit_count(self, git): git.return_value = fixture('rev_list_count') assert_equal(655, self.repo.commit_count('master')) assert_true(git.called) assert_equal(git.call_args, (('rev_list', 'master'), {})) @patch(Git, '_call_process') def test_commit(self, git): git.return_value = fixture('rev_list_single') commit = self.repo.commit('4c8124ffcf4039d292442eeccabdeca5af5c5017') assert_equal("4c8124ffcf4039d292442eeccabdeca5af5c5017", commit.id) assert_true(git.called) assert_equal(git.call_args, (('rev_list', '4c8124ffcf4039d292442eeccabdeca5af5c5017'), {'pretty': 'raw', 'max_count': 1})) @patch(Git, '_call_process') def test_tree(self, git): git.return_value = fixture('ls_tree_a') tree = self.repo.tree('master') assert_equal(4, len([c for c in tree.values() if isinstance(c, Blob)])) assert_equal(3, len([c for c in tree.values() if isinstance(c, Tree)])) assert_true(git.called) assert_equal(git.call_args, (('ls_tree', 'master'), {})) @patch(Git, '_call_process') def test_blob(self, git): git.return_value = fixture('cat_file_blob') blob = self.repo.blob("abc") assert_equal("Hello world", blob.data) assert_true(git.called) assert_equal(git.call_args, (('cat_file', 'abc'), {'p': True, 'with_raw_output': True})) @patch(Repo, '__init__') @patch(Git, '_call_process') def test_init_bare(self, repo, git): git.return_value = True Repo.init_bare("repos/foo/bar.git") assert_true(git.called) assert_equal(git.call_args, (('init', '--bare'), {})) assert_true(repo.called) assert_equal(repo.call_args, (('repos/foo/bar.git',), {})) @patch(Repo, '__init__') @patch(Git, '_call_process') def test_init_bare_with_options(self, repo, git): git.return_value = True Repo.init_bare("repos/foo/bar.git", **{'template': "/baz/sweet"}) assert_true(git.called) assert_equal(git.call_args, (('init', '--bare'), {'template': '/baz/sweet'})) assert_true(repo.called) assert_equal(repo.call_args, (('repos/foo/bar.git',), {})) @patch(Repo, '__init__') @patch(Git, '_call_process') def test_fork_bare(self, repo, git): git.return_value = None self.repo.fork_bare("repos/foo/bar.git") assert_true(git.called) path = os.path.join(absolute_project_path(), '.git') assert_equal(git.call_args, (('clone', path, 'repos/foo/bar.git'), {'bare': True})) assert_true(repo.called) @patch(Repo, '__init__') @patch(Git, '_call_process') def test_fork_bare_with_options(self, repo, git): git.return_value = None self.repo.fork_bare("repos/foo/bar.git", **{'template': '/awesome'}) assert_true(git.called) path = os.path.join(absolute_project_path(), '.git') assert_equal(git.call_args, (('clone', path, 'repos/foo/bar.git'), {'bare': True, 'template': '/awesome'})) assert_true(repo.called) @patch(Git, '_call_process') def test_diff(self, git): self.repo.diff('master^', 'master') assert_true(git.called) assert_equal(git.call_args, (('diff', 'master^', 'master', '--'), {})) self.repo.diff('master^', 'master', 'foo/bar') assert_true(git.called) assert_equal(git.call_args, (('diff', 'master^', 'master', '--', 'foo/bar'), {})) self.repo.diff('master^', 'master', 'foo/bar', 'foo/baz') assert_true(git.called) assert_equal(git.call_args, (('diff', 'master^', 'master', '--', 'foo/bar', 'foo/baz'), {})) @patch(Git, '_call_process') def test_diff(self, git): git.return_value = fixture('diff_p') diffs = self.repo.commit_diff('master') assert_equal(15, len(diffs)) assert_true(git.called) def test_archive_tar(self): self.repo.archive_tar def test_archive_tar_gz(self): self.repo.archive_tar_gz @patch('git.utils', 'touch') def test_enable_daemon_serve(self, touch): self.repo.daemon_serve = False assert_false(self.repo.daemon_serve) def test_disable_daemon_serve(self): self.repo.daemon_serve = True assert_true(self.repo.daemon_serve) # @patch(os.path, 'exists') # @patch('__builtin__', 'open') # def test_alternates_with_two_alternates(self, exists, read): # # File.expects(:exist?).with("#{absolute_project_path}/.git/objects/info/alternates").returns(true) # # File.expects(:read).returns("/path/to/repo1/.git/objects\n/path/to/repo2.git/objects\n") # exists.return_value = True # read.return_value = ("/path/to/repo1/.git/objects\n/path/to/repo2.git/objects\n") # # assert_equal(["/path/to/repo1/.git/objects", "/path/to/repo2.git/objects"], self.repo.alternates) # # assert_true(exists.called) # assert_true(read.called) # @patch(os.path, 'exists') def test_alternates_no_file(self, os): os.return_value = False assert_equal([], self.repo.alternates) assert_true(os.called) # @patch(os.path, 'exists') # def test_alternates_setter_ok(self, os): # os.return_value = True # alts = ['/path/to/repo.git/objects', '/path/to/repo2.git/objects'] # # # File.any_instance.expects(:write).with(alts.join("\n")) # # self.repo.alternates = alts # # assert_true(os.called) # # assert_equal(os.call_args, ((alts,), {})) # # for alt in alts: # # @patch(os.path, 'exists') # @raises(NoSuchPathError) # def test_alternates_setter_bad(self, os): # os.return_value = False # # alts = ['/path/to/repo.git/objects'] # # File.any_instance.expects(:write).never # self.repo.alternates = alts # # for alt in alts: # assert_true(os.called) # assert_equal(os.call_args, (alt, {})) @patch(os, 'remove') def test_alternates_setter_empty(self, os): self.repo.alternates = [] assert_true(os.called) def test_repr(self): path = os.path.join(os.path.abspath(GIT_REPO), '.git') assert_equal('<git.Repo "%s">' % path, repr(self.repo)) @patch(Git, '_call_process') def test_log(self, git): git.return_value = fixture('rev_list') assert_equal('4c8124ffcf4039d292442eeccabdeca5af5c5017', self.repo.log()[0].id) assert_equal('ab25fd8483882c3bda8a458ad2965d2248654335', self.repo.log()[-1].id) assert_true(git.called) assert_equal(git.call_count, 2) assert_equal(git.call_args, (('log', 'master'), {'pretty': 'raw'})) @patch(Git, '_call_process') def test_log_with_path_and_options(self, git): git.return_value = fixture('rev_list') self.repo.log('master', 'file.rb', **{'max_count': 1}) assert_true(git.called) assert_equal(git.call_args, (('log', 'master', '--', 'file.rb'), {'pretty': 'raw', 'max_count': 1})) # @patch(Git, '_call_process') # @patch(Git, '_call_process') # def test_commit_deltas_from_nothing_new(self, gitb, gita): # gitb.return_value = fixture("rev_list_delta_b") # gita.return_value = fixture("rev_list_delta_a") # other_repo = Repo(GIT_REPO) # # self.repo.git.expects(:rev_list).with({}, "master").returns(fixture("rev_list_delta_b")) # # other_repo.git.expects(:rev_list).with({}, "master").returns(fixture("rev_list_delta_a")) # # delta_commits = self.repo.commit_deltas_from(other_repo) # assert_equal(0, len(delta_commits)) # assert_true(gitb.called) # assert_equal(gitb.call_args, (('rev_list', 'master'), {})) # assert_true(gita.called) # assert_equal(gita.call_args, (('rev_list', 'master'), {})) # # def test_commit_deltas_from_when_other_has_new(self): # other_repo = Repo(GIT_REPO) # # self.repo.git.expects(:rev_list).with({}, "master").returns(fixture("rev_list_delta_a")) # # other_repo.git.expects(:rev_list).with({}, "master").returns(fixture("rev_list_delta_b")) # # for ref in ['4c8124ffcf4039d292442eeccabdeca5af5c5017', # # '634396b2f541a9f2d58b00be1a07f0c358b999b3', # # 'ab25fd8483882c3bda8a458ad2965d2248654335']: # # Commit.expects(:find_all).with(other_repo, ref, :max_count => 1).returns([stub()]) # delta_commits = self.repo.commit_deltas_from(other_repo) # assert_equal(3, len(delta_commits)) def test_is_dirty_with_bare_repository(self): self.repo.bare = True assert_false(self.repo.is_dirty) @patch(Git, '_call_process') def test_is_dirty_with_clean_working_dir(self, git): self.repo.bare = False git.return_value = '' assert_false(self.repo.is_dirty) assert_equal(git.call_args, (('diff', 'HEAD'), {})) @patch(Git, '_call_process') def test_is_dirty_with_dirty_working_dir(self, git): self.repo.bare = False git.return_value = '''-aaa\n+bbb''' assert_true(self.repo.is_dirty) assert_equal(git.call_args, (('diff', 'HEAD'), {})) @patch(Git, '_call_process') def test_active_branch(self, git): git.return_value = 'refs/heads/major-refactoring' assert_equal(self.repo.active_branch, 'major-refactoring') assert_equal(git.call_args, (('symbolic_ref', 'HEAD'), {}))
{ "content_hash": "504a57422b2c39ba19fb079705facb39", "timestamp": "", "source": "github", "line_count": 320, "max_line_length": 142, "avg_line_length": 38.3, "alnum_prop": 0.5935052219321149, "repo_name": "directeur/git-python", "id": "669a8f62cb75410207229f08f3fa8db519161f51", "size": "12472", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/git/test_repo.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from .exceptions import NoSuchLibException, BadSignatureException, BadEncryptedDataException from .saltlib import SaltLib
{ "content_hash": "63fa6654c874240df76aa7c6aafeaedb", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 92, "avg_line_length": 61, "alnum_prop": 0.8852459016393442, "repo_name": "assaabloy-ppi/salt-channel-python", "id": "1cc89c509b4f07915e32e5ee7ea71aeef733dca9", "size": "122", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "saltchannel/saltlib/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2315" }, { "name": "Python", "bytes": "185637" } ], "symlink_target": "" }
__author__ = 'Juan Batiz-Benet' __email__ = 'juan@benet.ai' __version__ = '0.2.3' __doc__ = ''' `datastore-objects` is a simple *object mapper* on top of [datastore](https://github.com/jbenet/datastore) (not relational). Thanks to datastore's versatility, it makes it easy to (serialize and) persist custom classes to any sort of data storage service. Notice: please familiarize yourself with `datastore` first. ''' import datastore.core from .util import classproperty from .attribute_metaclass import AttributeMetaclass from .attribute_metaclass import DuplicateAttributeError from .attribute import Attribute from .model import Key from .model import Model from .manager import Manager from .object_datastore import ObjectDatastore
{ "content_hash": "7e1a1c8eef9b2b44a731baa07752754e", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 76, "avg_line_length": 33.54545454545455, "alnum_prop": 0.7737127371273713, "repo_name": "datastore/datastore.objects", "id": "aa0b4543b73d3972abe603f029d105ba062d319d", "size": "738", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "datastore/objects/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "56395" } ], "symlink_target": "" }
import copy import re class Node: def __init__(self, data=None): self.data = data self.next = None def __str__(self): if self.next: return str(self.next) return "" def __len__(self): if self.next: return len(self.next) return 0 class LiteralNode(Node): def __str__(self): return Node.__str__(self)+str(self.data) def __len__(self): return Node.__len__(self)+len(str(self.data).replace('\\','')) class RepeatsMinMaxNode(Node): def __str__(self): regex = self.data['regex'] if len(regex) > 1: self.data['regex'] = group(regex, non_capture=True) return Node.__str__(self)+"%(regex)s{%(min)d,%(max)d}" % self.data def __len__(self): return len(self.data['regex'])+1 class RepeatsNumNode(Node): def __str__(self): regex = self.data['regex'] if len(regex) > 1: self.data['regex'] = group(regex, non_capture=True) return Node.__str__(self)+"%(regex)s{%(num)d}" % self.data def __len__(self): return len(self.data['regex'])+1 class OneOrMoreNode(Node): def __str__(self): if len(self.data) > 1: self.data = group(self.data, non_capture=True) return Node.__str__(self)+"%s+" % str(self.data) def __len__(self): return len(self.data)+1 class ZeroOrMoreNode(Node): def __str__(self): if len(self.data) > 1: self.data = group(self.data, non_capture=True) return Node.__str__(self)+"%s*" % str(self.data) def __len__(self): return len(self.data)+1 class OptionalNode(Node): def __str__(self): if len(self.data) > 1: self.data = group(self.data, non_capture=True) return Node.__str__(self)+"%s?" % str(self.data) def __len__(self): return len(self.data)+1 class GroupNode(Node): def __str__(self): if self.data['lazy']: return Node.__str__(self)+"(%(regex)s?)" % self.data return Node.__str__(self)+"(%(regex)s)" % self.data def __len__(self): return len(self.data['regex']) class NonCaptureGroupNode(Node): def __str__(self): if self.data['lazy']: return Node.__str__(self)+"(?:%(regex)s?)" % self.data return Node.__str__(self)+"(?:%(regex)s)" % self.data def __len__(self): return len(self.data['regex']) class RangeNode(Node): def __str__(self): return Node.__str__(self)+"[%s]" % str(self.data) def __len__(self): return Node.__len__(self)+1 class InvertedRangeNode(Node): def __str__(self): return Node.__str__(self)+"[^%s]" % str(self.data) def __len__(self): return Node.__len__(self)+1 class OrNode(Node): def __str__(self): if len(self.data['lhs']) > 1: self.data['lhs'] = group(self.data['lhs'], non_capture=True) if len(self.data['rhs']) > 1: self.data['rhs'] = group(self.data['rhs'], non_capture=True) return Node.__str__(self)+"%(lhs)s|%(rhs)s" % self.data def __len__(self): return max(len(self.data['lhs']), len(self.data['rhs'])) class RegexBuilder: def __init__(self): self.text = "" self.len = 0 self.root = Node() self.tail = self.root def __add_node(self, node): obj = copy.deepcopy(self) node.next = obj.root obj.root = node return obj def literal(self, lit): """ Adds a text literal to the regular expression. This escapes the literal """ return self.__add_node(LiteralNode(re.escape(lit))) def raw(self, lit): """ Adds a text literal to the regular expression. This does not escape the literal """ return self.__add_node(LiteralNode(lit)) def repeats(self, regex, min, max=None): """ Repeats the passed expression n times """ if max: return self.__add_node(RepeatsMinMaxNode({'min':min, 'max':max, 'regex': regex})) else: return self.__add_node(RepeatsNumNode({'num':min, 'regex': regex})) def group(self, regex, non_capture=False, lazy=False): """ Groups the passed regex with a backlink """ if non_capture: return self.__add_node(NonCaptureGroupNode({'regex': regex, 'lazy': lazy})) else: return self.__add_node(GroupNode({'regex': regex, 'lazy': lazy})) def one_or_more(self, regex): """ Repeats the passed expression one or more times """ return self.__add_node(OneOrMoreNode(regex)) def zero_or_more(self, regex): """ Repeats the passed expression zero or more times """ return self.__add_node(ZeroOrMoreNode(regex)) def optional(self, regex): """ Makes the passed expression optional """ return self.__add_node(OptionalNode(regex)) def range(self, range): """ Matches any characters in the range """ return self.__add_node(RangeNode(range)) def inverted_range(self, range): """ Matches any characters in the range """ return self.__add_node(InvertedRangeNode(range)) def alternate(self, lhs, rhs): return self.__add_node(OrNode({'lhs':lhs, 'rhs':rhs})) def append(self, regex): return self.__add_node(copy.deepcopy(LiteralNode(regex))) def class_(self, classtype): return self.__add_node(copy.deepcopy(LiteralNode('\\'+classtype))) def to_string(self): """ DEPRECATED: use str(regex) instead """ return str(self) def __str__(self): return str(self.root) def __len__(self): return len(self.root) def literal(lit): """ Adds a text literal to the regular expression """ return RegexBuilder().literal(lit) def raw(lit): """ Adds a text literal to the regular expression """ return RegexBuilder().raw(lit) def class_(lit): """ Adds a text literal to the regular expression """ return RegexBuilder().class_(lit) def repeats(regex, min=None, max=None): """ Repeats the passed expression n times """ return RegexBuilder().repeats(regex, min, max) def group(regex, non_capture=False, lazy=False): """ Groups the passed regex with a backlink """ return RegexBuilder().group(regex, non_capture, lazy) def one_or_more(regex): """ Repeats the passed expression one or more times """ return RegexBuilder().one_or_more(regex) def zero_or_more(regex): """ Repeats the passed expression zero or more times """ return RegexBuilder().zero_or_more(regex) def optional(regex): """ The passed expression is optional """ return RegexBuilder().optional(regex) def range(lit): """ Matches any characters in the range """ return RegexBuilder().range(lit) def inverted_range(lit): """ Matches any characters NOT in the range """ return RegexBuilder().inverted_range(lit) def alternate(lhs, rhs): """ Matches either the lhs OR the rhs expressions """ return RegexBuilder().alternate(lhs, rhs)
{ "content_hash": "862708ced519c806bcb05cba64c36e7c", "timestamp": "", "source": "github", "line_count": 219, "max_line_length": 95, "avg_line_length": 32.31963470319635, "alnum_prop": 0.5812376377507771, "repo_name": "bruntonspall/regex-builder", "id": "b9ab33f8e277e8010480b4c243b41258881c6c83", "size": "7078", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "builder.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "16919" } ], "symlink_target": "" }
import pytest from django.contrib.auth import get_user_model pytestmark = pytest.mark.django_db User = get_user_model() def test_create_user(): u = User.objects.create_user(email='f@example.com', password='abc', name="test user") assert u.is_active is True assert u.is_staff is False assert u.is_superuser is False assert u.email == 'f@example.com' assert str(u) == str(u.email) assert u.get_short_name() == 'test user' def test_create_user_with_unusable_password(): u = User.objects.create_user(email='f@example.com') assert u.has_usable_password() is False def test_create_super_user(): u = User.objects.create_superuser(email='f@example.com', password='abc') assert u.is_active is True assert u.is_staff is True assert u.is_superuser is True assert str(u) == str(u.email)
{ "content_hash": "7f53256da19bed5c689744d30064314b", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 89, "avg_line_length": 29.928571428571427, "alnum_prop": 0.6861575178997613, "repo_name": "akarambir/askcoding", "id": "cd57d166ada523fcc091bf831ee5d8a18ef57be9", "size": "877", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "askcoding/users/tests/test_models.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1800" }, { "name": "Python", "bytes": "26815" } ], "symlink_target": "" }
__author__ = 'Sergey Sobko' __email__ = 'S.Sobko@profitware.ru' __copyright__ = 'Copyright 2015, The Profitware Group' from scapy.packet import Packet, bind_layers from scapy.layers.l2 import Ether class PTPv2(Packet): """PTPv2 Protocol.""" # FIXME: Implement fields. name = 'PTPv2' bind_layers(Ether, PTPv2, type=0x88f7)
{ "content_hash": "86791f819f4e970e5a59eecc6969a7ad", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 54, "avg_line_length": 20.058823529411764, "alnum_prop": 0.6832844574780058, "repo_name": "profitware/iec61850", "id": "7d75640cb473589cca269fe980ccd45851028864", "size": "388", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "iec61850/protocol_ptpv2.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "4822" } ], "symlink_target": "" }
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0);
{ "content_hash": "64bc49ba050ea8743ef0c39b834a3a6e", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 165, "avg_line_length": 37.857142857142854, "alnum_prop": 0.7056603773584905, "repo_name": "antoinecarme/pyaf", "id": "35d8b82edaef19ec9b8da17cd94ba5c8be7399c7", "size": "265", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_30/ar_/test_artificial_128_Anscombe_LinearTrend_30__0.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "6773299" }, { "name": "Procfile", "bytes": "24" }, { "name": "Python", "bytes": "54209093" }, { "name": "R", "bytes": "807" }, { "name": "Shell", "bytes": "3619" } ], "symlink_target": "" }
import logging import traceback import networkx as nx from robograph.datamodel.base import exceptions logging.basicConfig(level=logging.ERROR) console = logging.getLogger(__name__) class Graph: """ A graph, composed of nodes and edges. Leverages the Networkx library to efficiently store graph components and perform graph traversals. """ def __init__(self, name, nodes=None): self._name = name self._nxgraph = nx.DiGraph() if nodes is not None: self.add_nodes(nodes) @property def root_node(self): """ Gives the root node of this graph. :return: datamodel.base.node.Node instance """ return nx.topological_sort(self._nxgraph, reverse=True)[-1] @property def nodes(self): """ Returns a list of nodes in this graph :return: list """ return self._nxgraph.nodes() @property def edges(self): """ Returns a list of dicts, each one describing a edge of the graph :return: list """ nx_edges = self._nxgraph.edges(data=True) result = [] for e in nx_edges: result.append(dict(node_from=e[0], node_to=e[1], output_label=e[2]['name'])) return result @property def nxgraph(self): return self._nxgraph @property def name(self): return self._name def add_node(self, node): """ Adds the specified node to the graph :param node: datamodel.base.node.Node instance :return: None """ self._nxgraph.add_node(node) def add_nodes(self, sequence_of_nodes): """ Adds the specified collection of nodes to the graph :param sequence_of_nodes: collections of datamodel.base.node.Node instances :return: None """ for node in sequence_of_nodes: self.add_node(node) def remove_node(self, node): """ Removes the specified node from the graph :param node: a datamodel.base.node.Node instance :return: None """ if not self._nxgraph.has_node(node): raise exceptions.NodeDeletionError('Graph does not contain this node') self._nxgraph.remove_node(node) def remove_nodes(self, sequence_of_nodes): """ Removes the specified collection of nodes from the graph :param sequence_of_nodes: collections of datamodel.base.node.Node instances :return: None """ for node in sequence_of_nodes: self.remove_node(node) def connect(self, node_from, node_to, output_label): """ Connects node_from to node_to on the underlying graph model and states that the output of node_from will be injected as labeled input into node_to using the specified label. :param node_from: datamodel.base.Node instance :param node_to: datamodel.base.Node instance :param output_label: str :return: None """ if not self._nxgraph.has_node(node_from): raise exceptions.NodeConnectionError('Graph does not contain ' 'node_from: %s' % (node_from,)) if not self._nxgraph.has_node(node_to): raise exceptions.NodeConnectionError('Graph does not contain ' 'node_to: %s' % (node_to,)) node_to.set_output_label(output_label) self._nxgraph.add_edge(node_from, node_to, name=output_label) def has_isles(self): """ Tells if the graph has subgraphs. If so, it means that the graph has at least one node that is "isolated" from the bigger graph component. :return: bool """ return len(nx.isolates(self._nxgraph)) != 0 def execute(self, result_label="result"): """ Starts from the leaf nodes, calculates their outputs and feeds them as inputs to their parent ones. The loop stops once the root node is reached. Optionally, you can assign a custom label to the output of the root node. :param result_label: str (optional) :return: """ # Cannote execute graphs with isles if self.has_isles(): raise exceptions.GraphExecutionError("Cannot execute graphs with " "isolated nodes") # Sort post-order (leaf nodes before, root node at then end) ordered_nodes = nx.topological_sort(self._nxgraph, reverse=True) # Assign a label to the output of the very last node to be executed: # the root node! self.root_node.set_output_label(result_label) # Output of node N is input for its parent try: for n in ordered_nodes: output = n.execute() predecessors = self._nxgraph.predecessors(n) if not predecessors: return output for parent in predecessors: parent.input(output) except exceptions.StopGraphExecutionSignal as e: console.info(e.message) return None except Exception as e: console.error(traceback.format_exc()) raise exceptions.GraphExecutionError(e.message) def reset(self): """ Resets all the nodes in the current graph, thus making the graph ready for a new execution :return: None """ for n in self._nxgraph.nodes(): n.reset() def __repr__(self): return '<graph: %s instance of: %s>' % (self._name or '', str(self.__class__)) def __len__(self): return self._nxgraph.number_of_nodes()
{ "content_hash": "e69cf4796a921bf69285b9e9f9cae0e3", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 83, "avg_line_length": 33.84393063583815, "alnum_prop": 0.5771135781383433, "repo_name": "csparpa/robograph", "id": "398304336520a8c504bb70d74b46d2f6f4c29f5b", "size": "5855", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "robograph/datamodel/base/graph.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "99989" }, { "name": "Shell", "bytes": "53" } ], "symlink_target": "" }
"""decoders utilities.""" import functools from typing import Dict, Optional import chex from clrs._src import probing from clrs._src import specs import haiku as hk import jax import jax.numpy as jnp _Array = chex.Array _DataPoint = probing.DataPoint _Location = specs.Location _Spec = specs.Spec _Stage = specs.Stage _Type = specs.Type def log_sinkhorn(x: _Array, steps: int, temperature: float, zero_diagonal: bool, noise_rng_key: Optional[_Array]) -> _Array: """Sinkhorn operator in log space, to postprocess permutation pointer logits. Args: x: input of shape [..., n, n], a batch of square matrices. steps: number of iterations. temperature: temperature parameter (as temperature approaches zero, the output approaches a permutation matrix). zero_diagonal: whether to force the diagonal logits towards -inf. noise_rng_key: key to add Gumbel noise. Returns: Elementwise logarithm of a doubly-stochastic matrix (a matrix with non-negative elements whose rows and columns sum to 1). """ assert x.ndim >= 2 assert x.shape[-1] == x.shape[-2] if noise_rng_key is not None: # Add standard Gumbel noise (see https://arxiv.org/abs/1802.08665) noise = -jnp.log(-jnp.log(jax.random.uniform(noise_rng_key, x.shape) + 1e-12) + 1e-12) x = x + noise x /= temperature if zero_diagonal: x = x - 1e6 * jnp.eye(x.shape[-1]) for _ in range(steps): x = jax.nn.log_softmax(x, axis=-1) x = jax.nn.log_softmax(x, axis=-2) return x def construct_decoders(loc: str, t: str, hidden_dim: int, nb_dims: int, name: str): """Constructs decoders.""" linear = functools.partial(hk.Linear, name=f"{name}_dec_linear") if loc == _Location.NODE: # Node decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1),) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims),) elif t in [_Type.POINTER, _Type.PERMUTATION_POINTER]: decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1)) else: raise ValueError(f"Invalid Type {t}") elif loc == _Location.EDGE: # Edge decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1), linear(1), linear(1)) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims), linear(nb_dims), linear(nb_dims)) elif t == _Type.POINTER: decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1)) else: raise ValueError(f"Invalid Type {t}") elif loc == _Location.GRAPH: # Graph decoders. if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: decoders = (linear(1), linear(1)) elif t == _Type.CATEGORICAL: decoders = (linear(nb_dims), linear(nb_dims)) elif t == _Type.POINTER: decoders = (linear(1), linear(1), linear(1)) else: raise ValueError(f"Invalid Type {t}") else: raise ValueError(f"Invalid Location {loc}") return decoders def construct_diff_decoders(name: str): """Constructs diff decoders.""" linear = functools.partial(hk.Linear, name=f"{name}_diffdec_linear") decoders = {} decoders[_Location.NODE] = linear(1) decoders[_Location.EDGE] = (linear(1), linear(1), linear(1)) decoders[_Location.GRAPH] = (linear(1), linear(1)) return decoders def postprocess(spec: _Spec, preds: Dict[str, _Array], sinkhorn_temperature: float, sinkhorn_steps: int, hard: bool) -> Dict[str, _DataPoint]: """Postprocesses decoder output. This is done on outputs in order to score performance, and on hints in order to score them but also in order to feed them back to the model. At scoring time, the postprocessing mode is "hard", logits will be arg-maxed and masks will be thresholded. However, for the case of the hints that are fed back in the model, the postprocessing can be hard or soft, depending on whether we want to let gradients flow through them or not. Args: spec: The spec of the algorithm whose outputs/hints we are postprocessing. preds: Output and/or hint predictions, as produced by decoders. sinkhorn_temperature: Parameter for the sinkhorn operator on permutation pointers. sinkhorn_steps: Parameter for the sinkhorn operator on permutation pointers. hard: whether to do hard postprocessing, which involves argmax for MASK_ONE, CATEGORICAL and POINTERS, thresholding for MASK, and stop gradient through for SCALAR. If False, soft postprocessing will be used, with softmax, sigmoid and gradients allowed. Returns: The postprocessed `preds`. In "soft" post-processing, POINTER types will change to SOFT_POINTER, so encoders know they do not need to be pre-processed before feeding them back in. """ result = {} for name in preds.keys(): _, loc, t = spec[name] new_t = t data = preds[name] if t == _Type.SCALAR: if hard: data = jax.lax.stop_gradient(data) elif t == _Type.MASK: if hard: data = (data > 0.0) * 1.0 else: data = jax.nn.sigmoid(data) elif t in [_Type.MASK_ONE, _Type.CATEGORICAL]: cat_size = data.shape[-1] if hard: best = jnp.argmax(data, -1) data = hk.one_hot(best, cat_size) else: data = jax.nn.softmax(data, axis=-1) elif t == _Type.POINTER: if hard: data = jnp.argmax(data, -1).astype(float) else: data = jax.nn.softmax(data, -1) new_t = _Type.SOFT_POINTER elif t == _Type.PERMUTATION_POINTER: # Convert the matrix of logits to a doubly stochastic matrix. data = log_sinkhorn( x=data, steps=sinkhorn_steps, temperature=sinkhorn_temperature, zero_diagonal=True, noise_rng_key=None) data = jnp.exp(data) if hard: data = jax.nn.one_hot(jnp.argmax(data, axis=-1), data.shape[-1]) else: raise ValueError("Invalid type") result[name] = probing.DataPoint( name=name, location=loc, type_=new_t, data=data) return result def decode_fts( decoders, spec: _Spec, h_t: _Array, adj_mat: _Array, edge_fts: _Array, graph_fts: _Array, inf_bias: bool, inf_bias_edge: bool, repred: bool, ): """Decodes node, edge and graph features.""" output_preds = {} hint_preds = {} for name in decoders: decoder = decoders[name] stage, loc, t = spec[name] if loc == _Location.NODE: preds = _decode_node_fts(decoder, t, h_t, edge_fts, adj_mat, inf_bias, repred) elif loc == _Location.EDGE: preds = _decode_edge_fts(decoder, t, h_t, edge_fts, adj_mat, inf_bias_edge) elif loc == _Location.GRAPH: preds = _decode_graph_fts(decoder, t, h_t, graph_fts) else: raise ValueError("Invalid output type") if stage == _Stage.OUTPUT: output_preds[name] = preds elif stage == _Stage.HINT: hint_preds[name] = preds else: raise ValueError(f"Found unexpected decoder {name}") return hint_preds, output_preds def _decode_node_fts(decoders, t: str, h_t: _Array, edge_fts: _Array, adj_mat: _Array, inf_bias: bool, repred: bool) -> _Array: """Decodes node features.""" if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(decoders[0](h_t), -1) elif t == _Type.CATEGORICAL: preds = decoders[0](h_t) elif t in [_Type.POINTER, _Type.PERMUTATION_POINTER]: p_1 = decoders[0](h_t) p_2 = decoders[1](h_t) p_3 = decoders[2](edge_fts) p_e = jnp.expand_dims(p_2, -2) + p_3 p_m = jnp.maximum(jnp.expand_dims(p_1, -2), jnp.transpose(p_e, (0, 2, 1, 3))) preds = jnp.squeeze(decoders[3](p_m), -1) if inf_bias: per_batch_min = jnp.min(preds, axis=range(1, preds.ndim), keepdims=True) preds = jnp.where(adj_mat > 0.5, preds, jnp.minimum(-1.0, per_batch_min - 1.0)) if t == _Type.PERMUTATION_POINTER: if repred: # testing or validation, no Gumbel noise preds = log_sinkhorn( x=preds, steps=10, temperature=0.1, zero_diagonal=True, noise_rng_key=None) else: # training, add Gumbel noise preds = log_sinkhorn( x=preds, steps=10, temperature=0.1, zero_diagonal=True, noise_rng_key=hk.next_rng_key()) else: raise ValueError("Invalid output type") return preds def _decode_edge_fts(decoders, t: str, h_t: _Array, edge_fts: _Array, adj_mat: _Array, inf_bias_edge: bool) -> _Array: """Decodes edge features.""" pred_1 = decoders[0](h_t) pred_2 = decoders[1](h_t) pred_e = decoders[2](edge_fts) pred = (jnp.expand_dims(pred_1, -2) + jnp.expand_dims(pred_2, -3) + pred_e) if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(pred, -1) elif t == _Type.CATEGORICAL: preds = pred elif t == _Type.POINTER: pred_2 = decoders[3](h_t) p_m = jnp.maximum(jnp.expand_dims(pred, -2), jnp.expand_dims( jnp.expand_dims(pred_2, -3), -3)) preds = jnp.squeeze(decoders[4](p_m), -1) else: raise ValueError("Invalid output type") if inf_bias_edge and t in [_Type.MASK, _Type.MASK_ONE]: per_batch_min = jnp.min(preds, axis=range(1, preds.ndim), keepdims=True) preds = jnp.where(adj_mat > 0.5, preds, jnp.minimum(-1.0, per_batch_min - 1.0)) return preds def _decode_graph_fts(decoders, t: str, h_t: _Array, graph_fts: _Array) -> _Array: """Decodes graph features.""" gr_emb = jnp.max(h_t, axis=-2) pred_n = decoders[0](gr_emb) pred_g = decoders[1](graph_fts) pred = pred_n + pred_g if t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]: preds = jnp.squeeze(pred, -1) elif t == _Type.CATEGORICAL: preds = pred elif t == _Type.POINTER: pred_2 = decoders[2](h_t) ptr_p = jnp.expand_dims(pred, 1) + jnp.transpose(pred_2, (0, 2, 1)) preds = jnp.squeeze(ptr_p, 1) else: raise ValueError("Invalid output type") return preds def maybe_decode_diffs( diff_decoders, h_t: _Array, edge_fts: _Array, graph_fts: _Array, decode_diffs: bool, ) -> Optional[Dict[str, _Array]]: """Optionally decodes node, edge and graph diffs.""" if decode_diffs: preds = {} node = _Location.NODE edge = _Location.EDGE graph = _Location.GRAPH preds[node] = _decode_node_diffs(diff_decoders[node], h_t) preds[edge] = _decode_edge_diffs(diff_decoders[edge], h_t, edge_fts) preds[graph] = _decode_graph_diffs(diff_decoders[graph], h_t, graph_fts) else: preds = None return preds def _decode_node_diffs(decoders, h_t: _Array) -> _Array: """Decodes node diffs.""" return jnp.squeeze(decoders(h_t), -1) def _decode_edge_diffs(decoders, h_t: _Array, edge_fts: _Array) -> _Array: """Decodes edge diffs.""" e_pred_1 = decoders[0](h_t) e_pred_2 = decoders[1](h_t) e_pred_e = decoders[2](edge_fts) preds = jnp.squeeze( jnp.expand_dims(e_pred_1, -1) + jnp.expand_dims(e_pred_2, -2) + e_pred_e, -1, ) return preds def _decode_graph_diffs(decoders, h_t: _Array, graph_fts: _Array) -> _Array: """Decodes graph diffs.""" gr_emb = jnp.max(h_t, axis=-2) g_pred_n = decoders[0](gr_emb) g_pred_g = decoders[1](graph_fts) preds = jnp.squeeze(g_pred_n + g_pred_g, -1) return preds
{ "content_hash": "750151fdb5141f6a7ee3bebeb9fd2b1f", "timestamp": "", "source": "github", "line_count": 367, "max_line_length": 80, "avg_line_length": 31.861035422343324, "alnum_prop": 0.6102796544941418, "repo_name": "deepmind/clrs", "id": "974fec68129030693ad8d713d09dc0a796e070de", "size": "12389", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "clrs/_src/decoders.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "398809" } ], "symlink_target": "" }
from pyjamas import DOM from pyjamas import Factory from FocusWidget import FocusWidget from pyjamas.ui import Event from __pyjamas__ import console class ListBox(FocusWidget): def __init__(self, **kwargs): if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-ListBox" self.changeListeners = [] self.INSERT_AT_END = -1 if kwargs.has_key('Element'): element = kwargs.pop('Element') else: element = DOM.createSelect() FocusWidget.__init__(self, element, **kwargs) self.sinkEvents(Event.ONCHANGE) def addChangeListener(self, listener): self.changeListeners.append(listener) def addItem(self, item, value = None): self.insertItem(item, value, self.INSERT_AT_END) def clear(self): h = self.getElement() while DOM.getChildCount(h) > 0: DOM.removeChild(h, DOM.getChild(h, 0)) def getItemCount(self): return DOM.getChildCount(self.getElement()) def getItemText(self, index): child = DOM.getChild(self.getElement(), index) return DOM.getInnerText(child) def getName(self): return DOM.getAttribute(self.getElement(), "name") def getSelectedIndex(self): """ returns the selected item's index on a single-select listbox. returns -1 if no item is selected. for multi-select, use repeated calls to isItemSelected. """ return DOM.getIntAttribute(self.getElement(), "selectedIndex") def getValue(self, index): self.checkIndex(index) option = DOM.getChild(self.getElement(), index) return DOM.getAttribute(option, "value") def getVisibleItemCount(self): return DOM.getIntAttribute(self.getElement(), "size") # also callable as insertItem(item, index) def insertItem(self, item, value, index=None): if index is None: index = value value = None DOM.insertListItem(self.getElement(), item, value, index) def isItemSelected(self, index): self.checkIndex(index) option = DOM.getChild(self.getElement(), index) return DOM.getBooleanAttribute(option, "selected") def isMultipleSelect(self): return DOM.getBooleanAttribute(self.getElement(), "multiple") def onBrowserEvent(self, event): if DOM.eventGetType(event) == "change": for listener in self.changeListeners: if hasattr(listener, 'onChange'): listener.onChange(self) else: listener(self) else: FocusWidget.onBrowserEvent(self, event) def removeChangeListener(self, listener): self.changeListeners.remove(listener) def removeItem(self, idx): child = DOM.getChild(self.getElement(), idx) DOM.removeChild(self.getElement(), child) def setItemSelected(self, index, selected): self.checkIndex(index) option = DOM.getChild(self.getElement(), index) DOM.setIntAttribute(option, "selected", selected and 1 or 0) def setMultipleSelect(self, multiple): DOM.setBooleanAttribute(self.getElement(), "multiple", multiple) def setName(self, name): DOM.setAttribute(self.getElement(), "name", name) def setSelectedIndex(self, index): DOM.setIntAttribute(self.getElement(), "selectedIndex", index) def selectValue(self, value): """ selects the ListBox according to a value. to select by item, see selectItem. # http://code.google.com/p/pyjamas/issues/detail?id=63 """ for n in range(self.getItemCount()): if self.getValue(n) == value: self.setSelectedIndex(n) return n return None def selectItem(self, item): """ selects the ListBox according to an item's text to select by value, see selectValue. # http://code.google.com/p/pyjamas/issues/detail?id=63 """ for n in range(self.getItemCount()): if self.getItemText(n) == item: self.setSelectedIndex(n) return n return None def setItemText(self, index, text): self.checkIndex(index) if text is None: console.error("Cannot set an option to have null text") return DOM.setOptionText(self.getElement(), text, index) def setValue(self, index, value): self.checkIndex(index) option = DOM.getChild(self.getElement(), index) DOM.setAttribute(option, "value", value) def setVisibleItemCount(self, visibleItems): DOM.setIntAttribute(self.getElement(), "size", visibleItems) def checkIndex(self, index): elem = self.getElement() if (index < 0) or (index >= DOM.getChildCount(elem)): #throw new IndexOutOfBoundsException(); pass def getSelectedItemText(self, ignore_first_value = False): selected = [] if ignore_first_value: start_idx = 1 else: start_idx = 0 for i in range(start_idx,self.getItemCount()): if self.isItemSelected(i): selected.append(self.getItemText(i)) return selected def getSelectedValues(self, ignore_first_value = False): selected = [] if ignore_first_value: start_idx = 1 else: start_idx = 0 for i in range(start_idx,self.getItemCount()): if self.isItemSelected(i): selected.append(self.getValue(i)) return selected def setItemTextSelection(self, values): if not values: values = [] self.setSelectedIndex(0) for i in range(0,self.getItemCount()): if self.getItemText(i) in values: self.setItemSelected(i, "selected") else: self.setItemSelected(i, "") def setValueSelection(self, values): if not values: values = [] self.setSelectedIndex(0) for i in range(0,self.getItemCount()): if self.getValue(i) in values: self.setItemSelected(i, "selected") else: self.setItemSelected(i, "") Factory.registerClass('pyjamas.ui.ListBox', ListBox)
{ "content_hash": "1bd1a9eb4e47bee8d3a3c95b142563a0", "timestamp": "", "source": "github", "line_count": 190, "max_line_length": 77, "avg_line_length": 33.48421052631579, "alnum_prop": 0.6038981452373468, "repo_name": "andreyvit/pyjamas", "id": "99d5931f2268888919864a9add4b172f863f1baa", "size": "7021", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "library/pyjamas/ui/ListBox.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "400333" }, { "name": "PHP", "bytes": "121841" }, { "name": "Python", "bytes": "3726391" }, { "name": "Shell", "bytes": "11256" } ], "symlink_target": "" }
"""Test configs for static_rnn_with_control_flow_v2.""" import tensorflow as tf from tensorflow.lite.testing.zip_test_utils import create_tensor_data from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests from tensorflow.lite.testing.zip_test_utils import register_make_test_function from tensorflow.python.framework import test_util from tensorflow.python.ops import rnn @register_make_test_function("make_static_rnn_with_control_flow_v2_tests") @test_util.enable_control_flow_v2 def make_static_rnn_with_control_flow_v2_tests(options): """Make a set of tests to do basic Lstm cell.""" test_parameters = [ { "dtype": [tf.float32], "num_batches": [4], "time_step_size": [4], "input_vec_size": [3], "num_cells": [4], "use_sequence_length": [True, False], }, ] def build_graph(parameters): """Build a simple graph with BasicLSTMCell.""" num_batches = parameters["num_batches"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] num_cells = parameters["num_cells"] inputs_after_split = [] for i in range(time_step_size): one_timestamp_input = tf.compat.v1.placeholder( dtype=parameters["dtype"], name="split_{}".format(i), shape=[num_batches, input_vec_size]) inputs_after_split.append(one_timestamp_input) lstm_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell( num_cells, activation=tf.nn.relu, state_is_tuple=True) sequence_length = None if parameters["use_sequence_length"]: # Using different sequence length in each bach, like [1, 2, 3, 3...]. sequence_length = [ min(i + 1, time_step_size) for i in range(num_batches) ] cell_outputs, _ = rnn.static_rnn( lstm_cell, inputs_after_split, dtype=tf.float32, sequence_length=sequence_length) out = cell_outputs[-1] return inputs_after_split, [out] def build_inputs(parameters, sess, inputs, outputs): """Feed inputs, assign variables, and freeze graph.""" with tf.compat.v1.variable_scope("", reuse=True): kernel = tf.compat.v1.get_variable("rnn/basic_lstm_cell/kernel") bias = tf.compat.v1.get_variable("rnn/basic_lstm_cell/bias") kernel_values = create_tensor_data(parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1) bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0, 1) sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values))) num_batches = parameters["num_batches"] time_step_size = parameters["time_step_size"] input_vec_size = parameters["input_vec_size"] input_values = [] for _ in range(time_step_size): tensor_data = create_tensor_data(parameters["dtype"], [num_batches, input_vec_size], 0, 1) input_values.append(tensor_data) out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values))) return input_values, out make_zip_of_tests( options, test_parameters, build_graph, build_inputs, use_frozen_graph=True)
{ "content_hash": "174867109c3a747cf6d71c9f7b1ac068", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 80, "avg_line_length": 38.348837209302324, "alnum_prop": 0.6224984839296543, "repo_name": "Intel-tensorflow/tensorflow", "id": "163a655f1999c042c1aad0be4014d110fbb5a137", "size": "3987", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tensorflow/lite/testing/op_tests/static_rnn_with_control_flow_v2.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "36962" }, { "name": "C", "bytes": "1400913" }, { "name": "C#", "bytes": "13584" }, { "name": "C++", "bytes": "126099634" }, { "name": "CMake", "bytes": "182430" }, { "name": "Cython", "bytes": "5003" }, { "name": "Dockerfile", "bytes": "416133" }, { "name": "Go", "bytes": "2129888" }, { "name": "HTML", "bytes": "4686483" }, { "name": "Java", "bytes": "1074438" }, { "name": "Jupyter Notebook", "bytes": "792906" }, { "name": "LLVM", "bytes": "6536" }, { "name": "MLIR", "bytes": "11447433" }, { "name": "Makefile", "bytes": "2760" }, { "name": "Objective-C", "bytes": "172666" }, { "name": "Objective-C++", "bytes": "300213" }, { "name": "Pawn", "bytes": "5552" }, { "name": "Perl", "bytes": "7536" }, { "name": "Python", "bytes": "42782002" }, { "name": "Roff", "bytes": "5034" }, { "name": "Ruby", "bytes": "9199" }, { "name": "Shell", "bytes": "621917" }, { "name": "Smarty", "bytes": "89538" }, { "name": "SourcePawn", "bytes": "14625" }, { "name": "Starlark", "bytes": "7738020" }, { "name": "Swift", "bytes": "78435" }, { "name": "Vim Snippet", "bytes": "58" } ], "symlink_target": "" }
import logging import msgpack import inspect import os import sys import time import zmq import zmq.auth from zmq.auth.thread import ThreadAuthenticator import util import util.config import plugins from daemon import Daemon logger = logging.getLogger() class Server(Daemon): def load_plugins(self): self.plugs = {} for name, obj in inspect.getmembers(plugins): if 'Plugin' in name and inspect.isclass(obj): self.plugs[obj._name] = obj.decode logger.info("%s", self.plugs) def handle_msg(self, msg): logger.info("Handling Message") raw = msgpack.unpackb(msg) logger.info('Recieved %s msg', raw['name']) self.plugs[raw['name']](raw) #logger.info("Got Data %s", msgpack.unpackb(raw['data'])) def run(self): ''' Run Ironhouse example ''' # These directories are generated by the generate_certificates script keys_dir = self.config['certs']['certs'] public_keys_dir = self.config['certs']['public'] secret_keys_dir = self.config['certs']['private'] if not (util.check_dir(keys_dir) and util.check_dir(public_keys_dir) and util.check_dir(secret_keys_dir)): logging.critical("Certificates are missing - run generate_certificates.py script first") sys.exit(1) logger.info("Keys: %s | Public: %s | Secret: %s", keys_dir, public_keys_dir, secret_keys_dir) ctx = zmq.Context.instance() # Start an authenticator for this context. auth = ThreadAuthenticator(ctx) auth.start() for ip in self.config['server']['auth']: auth.allow(ip) # Tell authenticator to use the certificate in a directory auth.configure_curve(domain='*', location=public_keys_dir) server = ctx.socket(zmq.REP) server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") server_public, server_secret = zmq.auth.load_certificate(server_secret_file) server.curve_secretkey = server_secret server.curve_publickey = server_public server.curve_server = True # must come before bind bind_info = 'tcp://%s:%s' % (self.config['server']['listen'], self.config['server']['port']) server.bind(bind_info) logger.info("Server bound to: %s", bind_info) self.load_plugins() logger.info("Starting reciever.") while True: msg = server.recv() self.handle_msg(msg) server.send("ack") auth.stop() if __name__ == '__main__': if zmq.zmq_version_info() < (4,0): raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version())) config = util.load_yaml_file(util.config.SERVER["config"]) util.init_logging(**config['logging']) daemon = Server(config['pid_file'], config_file=util.config.SERVER["config"]) logger.info("Started Server") daemon.run() if len(sys.argv) == 2: if 'start' == sys.argv[1]: daemon.start() elif 'stop' == sys.argv[1]: daemon.stop() elif 'restart' == sys.argv[1]: daemon.restart() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart" % sys.argv[0] sys.exit(2)
{ "content_hash": "f6ab45ccfe0dc330b35fd25fb9becc15", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 125, "avg_line_length": 33.27450980392157, "alnum_prop": 0.6069534472598703, "repo_name": "Darthone/atto", "id": "5b0d265030173399119289ace0e1922491980897", "size": "3417", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "atto/bin/server.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "7256" }, { "name": "JavaScript", "bytes": "15384" }, { "name": "Python", "bytes": "22648" }, { "name": "Shell", "bytes": "325" } ], "symlink_target": "" }
"""URL endpoint to allow bisect bots to post results to the dashboard.""" import json import logging from google.appengine.api import app_identity from google.appengine.ext import ndb from dashboard import post_data_handler from dashboard import update_bug_with_results from dashboard.common import datastore_hooks from dashboard.common import utils from dashboard.models import try_job _EXPECTED_RESULT_PROPERTIES = { 'status': ['pending', 'started', 'completed', 'failed', 'aborted'], } class BadRequestError(Exception): """An error indicating that a 400 response status should be returned.""" pass class PostBisectResultsHandler(post_data_handler.PostDataHandler): def post(self): """Validates data parameter and saves to TryJob entity. Bisect results come from a "data" parameter, which is a JSON encoding of a dictionary. The required fields are "master", "bot", "test". Request parameters: data: JSON encoding of a dictionary. Outputs: Empty 200 response with if successful, 200 response with warning message if optional data is invalid, 403 response with error message if sender IP is not white-listed, 400 response with error message if required data is invalid. 500 with error message otherwise. """ datastore_hooks.SetPrivilegedRequest() if not self._CheckIpAgainstWhitelist(): return data = self.request.get('data') if not data: self.ReportError('Missing "data" parameter.', status=400) return logging.info('Received data: %s', data) try: data = json.loads(self.request.get('data')) except ValueError: self.ReportError('Invalid JSON string.', status=400) return try: _ValidateResultsData(data) job = _GetTryJob(data) if not job: self.ReportWarning('No try job found.') return _UpdateTryJob(job, data) update_bug_with_results.UpdateQuickLog(job, in_progress=True) except BadRequestError as error: self.ReportError(error.message, status=400) def _ValidateResultsData(results_data): utils.Validate(_EXPECTED_RESULT_PROPERTIES, results_data) # TODO(chrisphan): Validate other values. def _UpdateTryJob(job, results_data): if not job.results_data: job.results_data = {} job.results_data.update(results_data) job.results_data['issue_url'] = (job.results_data.get('issue_url') or _IssueURL(job)) job.put() def _GetTryJob(results_data): try_job_id = results_data.get('try_job_id') if not try_job_id: return None job = ndb.Key(try_job.TryJob, try_job_id).get() return job def _IssueURL(job): """Returns a URL for information about a bisect try job.""" hostname = app_identity.get_default_version_hostname() job_id = job.buildbucket_job_id return 'https://%s/buildbucket_job_status/%s' % (hostname, job_id)
{ "content_hash": "299110cc3f3e8d3835f56785a35c3b5a", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 78, "avg_line_length": 29.02, "alnum_prop": 0.6946933149552033, "repo_name": "catapult-project/catapult-csm", "id": "9154747816caa1647df703d20e2802120a2c6f86", "size": "3065", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "dashboard/dashboard/post_bisect_results.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "4902" }, { "name": "C++", "bytes": "43728" }, { "name": "CSS", "bytes": "24873" }, { "name": "Go", "bytes": "80325" }, { "name": "HTML", "bytes": "11817766" }, { "name": "JavaScript", "bytes": "518002" }, { "name": "Makefile", "bytes": "1588" }, { "name": "Python", "bytes": "6207634" }, { "name": "Shell", "bytes": "2558" } ], "symlink_target": "" }
"""Generate KFDef YAML from kustomize packages. This is a helper tool aimed at generating the RAW Yaml for KFDef specs into kubeflow/manifests. We use kustomize to make it easier to generate KFDef YAML files corresponding to different KF versions but we don't want users to be exposed to that. """ import fire import logging import os import subprocess import tempfile import yaml RESOURCE_PREFIX = "kfdef.apps.kubeflow.org_v1_kfdef_" class KFDefBuilder: @staticmethod def run(): root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) kfdef_dir = os.path.join(root, "kfdef") source_dir = os.path.join(root, "kfdef", "source") # Walk over all versions for base, dirs, _ in os.walk(source_dir): for version in dirs: package_dir = os.path.join(base, version) # Create a temporary directory to write all the kustomize output to temp_dir = tempfile.mkdtemp() subprocess.check_call(["kustomize", "build", package_dir, "-o", temp_dir]) for f in os.listdir(temp_dir): new_name = f[len(RESOURCE_PREFIX):] # To preserve the existing pattern for now master files are just # named kfctl_?.Yaml # whereas version files are named kfctl_?.version.yaml # in subsequent PRs we might change that if version == "master": ext = ".yaml" else: ext = "." + version + ".yaml" basename, _ = os.path.splitext(new_name) new_name = basename + ext new_file = os.path.join(kfdef_dir, new_name.replace("-", "_")) logging.info(f"Processing file: {f} -> {new_file}") with open(os.path.join(temp_dir, f)) as hf: spec = yaml.load(hf) # Remove the name. Kustomize requires a name but we don't want # a name so that kfctl will fill it in based on the app directory del spec["metadata"]["name"] with open(new_file, "w") as hf: yaml.safe_dump(spec, hf, default_flow_style = False) if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(message)s|%(pathname)s|%(lineno)d|'), datefmt='%Y-%m-%dT%H:%M:%S', ) fire.Fire(KFDefBuilder)
{ "content_hash": "8a44a48263ab90e79e94d1890fa60ec2", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 77, "avg_line_length": 31.813333333333333, "alnum_prop": 0.5888516345347863, "repo_name": "kubeflow/code-intelligence", "id": "c47da6a1f56f5fb109191b97a409bf7d9224b0da", "size": "2386", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kubeflow_clusters/code-intelligence/upstream/manifests/hack/build_kfdef_specs.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "150684" }, { "name": "Dockerfile", "bytes": "4745" }, { "name": "Go", "bytes": "74600" }, { "name": "HTML", "bytes": "4237" }, { "name": "JavaScript", "bytes": "75524" }, { "name": "Jinja", "bytes": "21547" }, { "name": "Jupyter Notebook", "bytes": "1876429" }, { "name": "Less", "bytes": "262592" }, { "name": "Makefile", "bytes": "14819" }, { "name": "Python", "bytes": "189927" }, { "name": "SCSS", "bytes": "289359" }, { "name": "Shell", "bytes": "22273" }, { "name": "Smarty", "bytes": "113" } ], "symlink_target": "" }
import os import sys import argparse import subprocess as proc import hashlib parser = argparse.ArgumentParser(description="Launches multiple DotsBoxes instances") parser.add_argument("ports", metavar="PORT", type=int, nargs="+", help="TCP ports to use") parser.add_argument("--keep-configs", dest="keepconf", action="store_true", default=False, help="Do not generate new configs - use existing ones") parser.add_argument("--generate-only", dest="generate_only", action="store_true", default=False, help="Generate config files without processes run") script_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.normpath( os.path.join( script_dir, ".." ) ) classpath = os.path.join(root_dir, "bin") args = parser.parse_args() ports = [ str(port) for port in args.ports ] keepconf = args.keepconf generate_only = args.generate_only if keepconf and generate_only: sys.stderr.write("--keep-configs and --generate-only are mutually exclusive\n") exit(1) cmd = "java -Dfile.encoding=UTF-8 -classpath {0} dotsboxes.DotsBoxes".format(classpath).split() class Player: def __init__(self, name, password, ip, port): self.name = name self.ip = ip self.port = port md5 = hashlib.md5( name + password ) self.hash = md5.hexdigest() self.known_players = [] def __str__(self): return "{0} {1} {2} {3}".format(self.name, self.ip, self.port, self.hash ) def __eq__(self, other): return ( (self.name, self.ip, self.port, self.hash) == (other.name, other.ip, other.port, other.hash) ) def connect_to(self, player): self.known_players.append(player) class KnownPlayersConfig: def __init__(self, players): self.players = players def __str__(self): players_str = [ str(player) for player in self.players ] return "\n".join(players_str) players = [ Player( "Tester" + str(port), "qwerty", "127.0.0.1", port ) for port in ports ] # all to all connection for player in players: for other in players: if other != player: player.connect_to(other) processes = [] for player in players: player_cmd = cmd + [player.port] conf_filename = "{0}.conf".format(player.port) if not keepconf: conf = KnownPlayersConfig(player.known_players) with open(conf_filename, "w" ) as file: file.write( str(conf) ) player_cmd += [conf_filename] if not generate_only: output_file = open( "{0}.log".format(player.name), "w") processes.append( proc.Popen(player_cmd, stdout=output_file, stderr=output_file) ) retcode = 0 for process in processes: retcode += process.wait() if retcode != 0: print "FAILED" exit(retcode)
{ "content_hash": "d1442bb48c70ef6b5e534b1ae5bd3b15", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 148, "avg_line_length": 30.685393258426966, "alnum_prop": 0.6532405712193335, "repo_name": "asavonic/DotsBoxes", "id": "58435c8f09951ef82e5bfe5bf23d661391f0733c", "size": "2731", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/run_multiple_clients.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "103661" }, { "name": "Python", "bytes": "2731" } ], "symlink_target": "" }
from anu import Region import pickle regions = [] with open("regions.pickle", "rb") as f: regions = pickle.load(f) for r in regions: r.Name = r.Name.replace("'", "\'") r.Name = r.Name.replace("KAB. ", "") r.Name = r.Name.replace("KAB ", "") print len(regions) with open("regions.pickle", "wb") as f: pickle.dump(regions, f)
{ "content_hash": "dfac7172beca5dae409f5e30976615c8", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 40, "avg_line_length": 23.133333333333333, "alnum_prop": 0.6080691642651297, "repo_name": "ekospinach/kawaldesa", "id": "d6e5b0bf7a748ff13495d25844580a11c332e67c", "size": "347", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "SQLs/desa/fix-regions.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "94" }, { "name": "Batchfile", "bytes": "7316" }, { "name": "C#", "bytes": "369341" }, { "name": "CSS", "bytes": "8629" }, { "name": "JavaScript", "bytes": "152561" }, { "name": "Makefile", "bytes": "7422" }, { "name": "PLSQL", "bytes": "3072" }, { "name": "Python", "bytes": "16696" }, { "name": "TypeScript", "bytes": "170354" } ], "symlink_target": "" }
from django.contrib import admin from django.db.models import Q from competition.models.competition_model import Competition from competition.models.game_model import Game, GameScore from competition.models.organizer_model import Organizer from competition.models.organizer_model import OrganizerRole from competition.models.registration_model import Registration from competition.models.registration_model import RegistrationQuestion from competition.models.registration_model import RegistrationQuestionChoice from competition.models.registration_model import RegistrationQuestionResponse from competition.models.team_model import Team from competition.models.invitation_model import Invitation ############################################################################## # # Inline Admins # ############################################################################## class InlineTeamAdmin(admin.TabularInline): model = Team extra = 0 max_num = 0 fields = ('created', 'name', 'paid', 'time_paid', 'eligible_to_win') readonly_fields = ('created') ordering = ('created',) class InlineGameAdmin(admin.TabularInline): model = Game extra = 0 fields = ('id', 'start_time', 'end_time') readonly_fields = ('id',) class InlineGameScoreAdmin(admin.TabularInline): model = GameScore fields = ('team','score', 'extra_data') list_filter = ('team',) raw_id_fields = ('game',) class InlineOrganizerAdmin(admin.TabularInline): model = Organizer extra = 0 fields = ('user', 'role') filter_horizontal = ('role',) class InlineRegistrationAdmin(admin.TabularInline): model = Registration fields = ('user', 'signup_date',) readonly_fields = ('user', 'signup_date',) class InlineRegistrationQuestionAdmin(admin.StackedInline): model = RegistrationQuestion class InlineRegistrationQuestionChoiceAdmin(admin.StackedInline): model = RegistrationQuestionChoice class InlineResponseAdmin(admin.TabularInline): extra = 0 max_num = 0 supported_question_types = ('SA', 'SC', 'MC', 'AB') def queryset(self, request): qs = super(InlineResponseAdmin, self).queryset(request) supported = self.supported_question_types queries = [Q(question__question_type=t) for t in supported] query = reduce(lambda x, y: x | y, queries) return qs.filter(query) class InlineShortAnswerResponseAdmin(InlineResponseAdmin): model = RegistrationQuestionResponse fields = ('question', 'text_response') readonly_fields = ('question', 'text_response') supported_question_types = ('SA',) class InlineMultipleChoiceResponseAdmin(InlineResponseAdmin): model = RegistrationQuestionResponse fields = ('question', 'choices') readonly_fields = ('question', 'choices') supported_question_types = ('SC', 'MC') class InlineAgreementResponseAdmin(InlineResponseAdmin): model = RegistrationQuestionResponse fields = ('question', 'agreed') readonly_fields = ('question', 'agreed') supported_question_types = ('AB',) ############################################################################## # # Model Admins # ############################################################################## class CompetitionAdmin(admin.ModelAdmin): filter_horizontal = ('questions',) list_display = ('name', 'is_open', 'is_running', 'start_time', 'end_time') list_filter = ('is_open', 'is_running', 'start_time', 'end_time') prepopulated_fields = {"slug": ("name",)} inlines = (InlineOrganizerAdmin,) class GameAdmin(admin.ModelAdmin): inlines = (InlineGameScoreAdmin,) list_display = ('pk','competition','game_id') list_filter = ('competition',) class GameScoreAdmin(admin.ModelAdmin): list_display = ('pk','game','team','score') list_filter = ('team',) raw_id_fields = ('game',) class OrganizerRoleAdmin(admin.ModelAdmin): list_display = ('name', 'description') class OrganizerAdmin(admin.ModelAdmin): list_display = ('competition', 'user') filter_horizontal = ('role',) class RegistrationAdmin(admin.ModelAdmin): inlines = (InlineShortAnswerResponseAdmin, InlineMultipleChoiceResponseAdmin, InlineAgreementResponseAdmin) list_display = ('user', 'competition', 'signup_date', 'active') list_filter = ('signup_date', 'active') class RegistrationQuestionAdmin(admin.ModelAdmin): inlines = (InlineRegistrationQuestionChoiceAdmin,) list_display = ('question_type', 'question') list_filter = ('question_type',) class TeamAdmin(admin.ModelAdmin): filter_horizontal = ('members',) list_display = ('name', 'competition', 'created', 'eligible_to_win', 'paid') list_filter = ('competition', 'paid', 'created') prepopulated_fields = {"slug": ("name",)} class InvitationAdmin(admin.ModelAdmin): list_display = ('receiver', 'sender', 'team', 'sent', 'read') list_filter = ('sent', 'read') admin.site.register(Competition, CompetitionAdmin) admin.site.register(Game, GameAdmin) admin.site.register(OrganizerRole, OrganizerRoleAdmin) admin.site.register(Organizer, OrganizerAdmin) admin.site.register(Registration, RegistrationAdmin) admin.site.register(RegistrationQuestion, RegistrationQuestionAdmin) admin.site.register(Team, TeamAdmin) admin.site.register(Invitation, InvitationAdmin)
{ "content_hash": "e3d362b4d51a72a78752686f11c6c29e", "timestamp": "", "source": "github", "line_count": 170, "max_line_length": 78, "avg_line_length": 31.929411764705883, "alnum_prop": 0.6643330876934415, "repo_name": "michaelwisely/django-competition", "id": "2a80d24e493b5804db3add3fa23d0120bcfd5071", "size": "5428", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "src/competition/admin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "3975" }, { "name": "Cucumber", "bytes": "1592" }, { "name": "HTML", "bytes": "41099" }, { "name": "JavaScript", "bytes": "2111" }, { "name": "Makefile", "bytes": "782" }, { "name": "Python", "bytes": "290534" } ], "symlink_target": "" }
import os from ZenPacks.zenoss.ZenPackLib import zenpacklib CFG = zenpacklib.load_yaml([os.path.join(os.path.dirname(__file__), "zenpack.yaml")], verbose=False, level=30) schema = CFG.zenpack_module.schema
{ "content_hash": "6ae4ba1413b25658f99f8999cce84d3e", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 110, "avg_line_length": 41.4, "alnum_prop": 0.7632850241545893, "repo_name": "N-faycal/ZenPacks.iXsystems.TrueNAS", "id": "a1cb83c088e7e6b5a4deed71d8f9425cfd47b6b2", "size": "207", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "ZenPacks/iXsystems/TrueNAS/__init__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "JavaScript", "bytes": "994" }, { "name": "Python", "bytes": "18182" } ], "symlink_target": "" }
import json import logging from django.utils.text import normalize_newlines from django.utils.translation import ugettext_lazy as _ from django.views.decorators.debug import sensitive_variables from horizon import exceptions from horizon import forms from horizon.utils import validators from horizon import workflows from openstack_dashboard import api from openstack_dashboard.api import cinder from ...images_and_snapshots.utils import get_available_images LOG = logging.getLogger(__name__) class SelectProjectUserAction(workflows.Action): project_id = forms.ChoiceField(label=_("Project")) user_id = forms.ChoiceField(label=_("User")) def __init__(self, request, *args, **kwargs): super(SelectProjectUserAction, self).__init__(request, *args, **kwargs) # Set our project choices projects = [(tenant.id, tenant.name) for tenant in request.user.authorized_tenants] self.fields['project_id'].choices = projects # Set our user options users = [(request.user.id, request.user.username)] self.fields['user_id'].choices = users class Meta: name = _("Project & User") # Unusable permission so this is always hidden. However, we # keep this step in the workflow for validation/verification purposes. permissions = ("!",) class SelectProjectUser(workflows.Step): action_class = SelectProjectUserAction contributes = ("project_id", "user_id") class VolumeOptionsAction(workflows.Action): VOLUME_CHOICES = ( ('', _("Don't boot from a volume.")), ("volume_id", _("Boot from volume.")), ("volume_snapshot_id", _("Boot from volume snapshot " "(creates a new volume).")), ) # Boot from volume options volume_type = forms.ChoiceField(label=_("Volume Options"), choices=VOLUME_CHOICES, required=False) volume_id = forms.ChoiceField(label=_("Volume"), required=False) volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"), required=False) device_name = forms.CharField(label=_("Device Name"), required=False, initial="vda", help_text=_("Volume mount point (e.g. 'vda' " "mounts at '/dev/vda').")) delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"), initial=False, required=False, help_text=_("Delete volume on " "instance terminate")) class Meta: name = _("Volume Options") permissions = ('openstack.services.volume',) help_text_template = ("project/instances/" "_launch_volumes_help.html") def clean(self): cleaned_data = super(VolumeOptionsAction, self).clean() volume_opt = cleaned_data.get('volume_type', None) if volume_opt and not cleaned_data[volume_opt]: raise forms.ValidationError(_('Please choose a volume, or select ' '%s.') % self.VOLUME_CHOICES[0][1]) return cleaned_data def _get_volume_display_name(self, volume): if hasattr(volume, "volume_id"): vol_type = "snap" visible_label = _("Snapshot") else: vol_type = "vol" visible_label = _("Volume") return (("%s:%s" % (volume.id, vol_type)), (_("%(name)s - %(size)s GB (%(label)s)") % {'name': volume.display_name, 'size': volume.size, 'label': visible_label})) def populate_volume_id_choices(self, request, context): volume_options = [("", _("Select Volume"))] try: volumes = [v for v in cinder.volume_list(self.request) if v.status == api.cinder.VOLUME_STATE_AVAILABLE] volume_options.extend([self._get_volume_display_name(vol) for vol in volumes]) except: exceptions.handle(self.request, _('Unable to retrieve list of volumes.')) return volume_options def populate_volume_snapshot_id_choices(self, request, context): volume_options = [("", _("Select Volume Snapshot"))] try: snapshots = cinder.volume_snapshot_list(self.request) snapshots = [s for s in snapshots if s.status == api.cinder.VOLUME_STATE_AVAILABLE] volume_options.extend([self._get_volume_display_name(snap) for snap in snapshots]) except: exceptions.handle(self.request, _('Unable to retrieve list of volume ' 'snapshots.')) return volume_options class VolumeOptions(workflows.Step): action_class = VolumeOptionsAction depends_on = ("project_id", "user_id") contributes = ("volume_type", "volume_id", "device_name", # Can be None for an image. "delete_on_terminate") def contribute(self, data, context): context = super(VolumeOptions, self).contribute(data, context) # Translate form input to context for volume values. if "volume_type" in data and data["volume_type"]: context['volume_id'] = data.get(data['volume_type'], None) if not context.get("volume_type", ""): context['volume_type'] = self.action.VOLUME_CHOICES[0][0] context['volume_id'] = None context['device_name'] = None context['delete_on_terminate'] = None return context class SetInstanceDetailsAction(workflows.Action): SOURCE_TYPE_CHOICES = ( ("image_id", _("Image")), ("instance_snapshot_id", _("Snapshot")), ) source_type = forms.ChoiceField(label=_("Instance Source"), choices=SOURCE_TYPE_CHOICES) image_id = forms.ChoiceField(label=_("Image"), required=False) instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"), required=False) availability_zone = forms.ChoiceField(label=_("Availability Zone"), required=False) name = forms.CharField(max_length=80, label=_("Instance Name")) flavor = forms.ChoiceField(label=_("Flavor"), help_text=_("Size of image to launch.")) count = forms.IntegerField(label=_("Instance Count"), min_value=1, initial=1, help_text=_("Number of instances to launch.")) class Meta: name = _("Details") help_text_template = ("project/instances/" "_launch_details_help.html") def clean(self): cleaned_data = super(SetInstanceDetailsAction, self).clean() # Validate our instance source. source = cleaned_data['source_type'] # There should always be at least one image_id choice, telling the user # that there are "No Images Available" so we check for 2 here... volume_type = self.data.get('volume_type', None) if volume_type: # Boot from volume if cleaned_data[source]: raise forms.ValidationError(_("You can't select an instance " "source when booting from a " "Volume. The Volume is your " "source and should contain " "the operating system.")) else: # Boot from image / image_snapshot if source == 'image_id' and not \ filter(lambda x: x[0] != '', self.fields['image_id'].choices): raise forms.ValidationError(_("There are no image sources " "available; you must first " "create an image before " "attemtping to launch an " "instance.")) elif not cleaned_data[source]: raise forms.ValidationError(_("Please select an option for the" " instance source.")) # Prevent launching multiple instances with the same volume. # TODO(gabriel): is it safe to launch multiple instances with # a snapshot since it should be cloned to new volumes? count = cleaned_data.get('count', 1) if volume_type and count > 1: msg = _('Launching multiple instances is only supported for ' 'images and instance snapshots.') raise forms.ValidationError(msg) return cleaned_data def _init_images_cache(self): if not hasattr(self, '_images_cache'): self._images_cache = {} def populate_image_id_choices(self, request, context): self._init_images_cache() images = get_available_images(request, context.get('project_id'), self._images_cache) choices = [(image.id, image.name) for image in images if image.properties.get("image_type", '') != "snapshot"] if choices: choices.insert(0, ("", _("Select Image"))) else: choices.insert(0, ("", _("No images available."))) return choices def populate_instance_snapshot_id_choices(self, request, context): self._init_images_cache() images = get_available_images(request, context.get('project_id'), self._images_cache) choices = [(image.id, image.name) for image in images if image.properties.get("image_type", '') == "snapshot"] if choices: choices.insert(0, ("", _("Select Instance Snapshot"))) else: choices.insert(0, ("", _("No snapshots available."))) return choices def populate_flavor_choices(self, request, context): try: flavors = api.nova.flavor_list(request) flavor_list = [(flavor.id, "%s" % flavor.name) for flavor in flavors] except: flavor_list = [] exceptions.handle(request, _('Unable to retrieve instance flavors.')) return sorted(flavor_list) def populate_availability_zone_choices(self, request, context): try: zones = api.nova.availability_zone_list(request) except: zones = [] exceptions.handle(request, _('Unable to retrieve availability zones.')) zone_list = [(zone.zoneName, zone.zoneName) for zone in zones if zone.zoneState['available']] zone_list.sort() if zone_list: zone_list.insert(0, ("", _("Any Availability Zone"))) else: zone_list.insert(0, ("", _("No availability zones found."))) return zone_list def get_help_text(self): extra = {} try: extra['usages'] = api.nova.tenant_absolute_limits(self.request) extra['usages_json'] = json.dumps(extra['usages']) flavors = json.dumps([f._info for f in api.nova.flavor_list(self.request)]) extra['flavors'] = flavors except: exceptions.handle(self.request, _("Unable to retrieve quota information.")) return super(SetInstanceDetailsAction, self).get_help_text(extra) class SetInstanceDetails(workflows.Step): action_class = SetInstanceDetailsAction contributes = ("source_type", "source_id", "availability_zone", "name", "count", "flavor") def prepare_action_context(self, request, context): if 'source_type' in context and 'source_id' in context: context[context['source_type']] = context['source_id'] return context def contribute(self, data, context): context = super(SetInstanceDetails, self).contribute(data, context) # Allow setting the source dynamically. if ("source_type" in context and "source_id" in context and context["source_type"] not in context): context[context["source_type"]] = context["source_id"] # Translate form input to context for source values. if "source_type" in data: context["source_id"] = data.get(data['source_type'], None) return context KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import" class SetAccessControlsAction(workflows.Action): keypair = forms.DynamicChoiceField(label=_("Keypair"), required=False, help_text=_("Which keypair to use for " "authentication."), add_item_link=KEYPAIR_IMPORT_URL) admin_pass = forms.RegexField( label=_("Admin Pass"), required=False, widget=forms.PasswordInput(render_value=False), regex=validators.password_validator(), error_messages={'invalid': validators.password_validator_msg()}) confirm_admin_pass = forms.CharField( label=_("Confirm Admin Pass"), required=False, widget=forms.PasswordInput(render_value=False)) groups = forms.MultipleChoiceField(label=_("Security Groups"), required=True, initial=["default"], widget=forms.CheckboxSelectMultiple(), help_text=_("Launch instance in these " "security groups.")) class Meta: name = _("Access & Security") help_text = _("Control access to your instance via keypairs, " "security groups, and other mechanisms.") def populate_keypair_choices(self, request, context): try: keypairs = api.nova.keypair_list(request) keypair_list = [(kp.name, kp.name) for kp in keypairs] except: keypair_list = [] exceptions.handle(request, _('Unable to retrieve keypairs.')) if keypair_list: if len(keypair_list) == 1: self.fields['keypair'].initial = keypair_list[0][0] keypair_list.insert(0, ("", _("Select a keypair"))) else: keypair_list = (("", _("No keypairs available.")),) return keypair_list def populate_groups_choices(self, request, context): try: groups = api.nova.security_group_list(request) security_group_list = [(sg.name, sg.name) for sg in groups] except: exceptions.handle(request, _('Unable to retrieve list of security groups')) security_group_list = [] return security_group_list def clean(self): '''Check to make sure password fields match.''' cleaned_data = super(SetAccessControlsAction, self).clean() if 'admin_pass' in cleaned_data: if cleaned_data['admin_pass'] != cleaned_data.get( 'confirm_admin_pass', None): raise forms.ValidationError(_('Passwords do not match.')) return cleaned_data class SetAccessControls(workflows.Step): action_class = SetAccessControlsAction depends_on = ("project_id", "user_id") contributes = ("keypair_id", "security_group_ids", "admin_pass", "confirm_admin_pass") def contribute(self, data, context): if data: post = self.workflow.request.POST context['security_group_ids'] = post.getlist("groups") context['keypair_id'] = data.get("keypair", "") context['admin_pass'] = data.get("admin_pass", "") context['confirm_admin_pass'] = data.get("confirm_admin_pass", "") return context class CustomizeAction(workflows.Action): customization_script = forms.CharField(widget=forms.Textarea, label=_("Customization Script"), required=False, help_text=_("A script or set of " "commands to be " "executed after the " "instance has been " "built (max 16kb).")) class Meta: name = _("Post-Creation") help_text_template = ("project/instances/" "_launch_customize_help.html") class PostCreationStep(workflows.Step): action_class = CustomizeAction contributes = ("customization_script",) class SetNetworkAction(workflows.Action): network = forms.MultipleChoiceField(label=_("Networks"), required=True, widget=forms.CheckboxSelectMultiple(), error_messages={ 'required': _( "At least one network must" " be specified.")}, help_text=_("Launch instance with" " these networks")) class Meta: name = _("Networking") permissions = ('openstack.services.network',) help_text = _("Select networks for your instance.") def populate_network_choices(self, request, context): try: tenant_id = self.request.user.tenant_id networks = api.quantum.network_list_for_tenant(request, tenant_id) for n in networks: n.set_id_as_name_if_empty() network_list = [(network.id, network.name) for network in networks] except: network_list = [] exceptions.handle(request, _('Unable to retrieve networks.')) return network_list class SetNetwork(workflows.Step): action_class = SetNetworkAction template_name = "project/instances/_update_networks.html" contributes = ("network_id",) def contribute(self, data, context): if data: networks = self.workflow.request.POST.getlist("network") # If no networks are explicitly specified, network list # contains an empty string, so remove it. networks = [n for n in networks if n != ''] if networks: context['network_id'] = networks return context class LaunchInstance(workflows.Workflow): slug = "launch_instance" name = _("Launch Instance") finalize_button_name = _("Launch") success_message = _('Launched %(count)s named "%(name)s".') failure_message = _('Unable to launch %(count)s named "%(name)s".') success_url = "horizon:project:instances:index" default_steps = (SelectProjectUser, SetInstanceDetails, SetAccessControls, SetNetwork, VolumeOptions, PostCreationStep) def format_status_message(self, message): name = self.context.get('name', 'unknown instance') count = self.context.get('count', 1) if int(count) > 1: return message % {"count": _("%s instances") % count, "name": name} else: return message % {"count": _("instance"), "name": name} @sensitive_variables('context') def handle(self, request, context): custom_script = context.get('customization_script', '') # Determine volume mapping options if context.get('volume_type', None): if(context['delete_on_terminate']): del_on_terminate = 1 else: del_on_terminate = 0 mapping_opts = ("%s::%s" % (context['volume_id'], del_on_terminate)) dev_mapping = {context['device_name']: mapping_opts} else: dev_mapping = None netids = context.get('network_id', None) if netids: nics = [{"net-id": netid, "v4-fixed-ip": ""} for netid in netids] else: nics = None avail_zone = context.get('availability_zone', None) try: api.nova.server_create(request, context['name'], context['source_id'], context['flavor'], context['keypair_id'], normalize_newlines(custom_script), context['security_group_ids'], dev_mapping, nics=nics, availability_zone=avail_zone, instance_count=int(context['count']), admin_pass=context['admin_pass']) return True except: exceptions.handle(request) return False
{ "content_hash": "197588b4291324c975589f273e77dd50", "timestamp": "", "source": "github", "line_count": 525, "max_line_length": 79, "avg_line_length": 42.245714285714286, "alnum_prop": 0.5212137607646873, "repo_name": "fajoy/horizon-example", "id": "fce4c8fd21ee20595cd63af9ef1533ec17d507d5", "size": "22988", "binary": false, "copies": "1", "ref": "refs/heads/example", "path": "openstack_dashboard/dashboards/project/instances/workflows/create_instance.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from django import forms from django.forms import Select from django.utils.translation import ugettext_lazy as _ from cms.models import Page from utils import get_site_languages from models import TranslationRequest class AddTranslationForm(forms.ModelForm): class Meta: model = TranslationRequest fields = [ # 'from_lang', 'to_lang', added dynamically in __init__() 'provider', 'copy_content', 'pages', 'all_static_placeholders', ] widgets = { 'from_lang': Select, 'to_lang': Select, } def __init__(self, *args, **kwargs): super(AddTranslationForm, self).__init__(*args, **kwargs) self.fields['from_lang'] = forms.ChoiceField(choices=self.build_lang_choices()) self.fields['to_lang'] = forms.ChoiceField(choices=self.build_lang_choices()) # Displays page choices with levels page_choices = [] for page in self.fields['pages'].choices: page_choices.append((page[0], '%s %s' % ('+' * Page.objects.get(pk=page[0]).level, page[1]))) self.fields['pages'].choices = page_choices def build_lang_choices(self): choices = list() for lang in get_site_languages(): choices.append((lang['code'], _(lang['name']))) return choices def clean(self): cleaned_data = super(AddTranslationForm, self).clean() if cleaned_data.get('from_lang') == cleaned_data.get('to_lang'): msg = _('Please select two different languages') self._errors['from_lang'] = self.error_class(['']) self._errors['to_lang'] = self.error_class([msg]) return cleaned_data class SelectPluginsByTypeForm(forms.Form): plugins = None def __init__(self, *args, **kwargs): plugins = kwargs.pop('plugins') super(SelectPluginsByTypeForm, self).__init__(*args, **kwargs) choices = list() for plugin, count in plugins.items(): # TODO: Get real name of plugin instead of class # But since we store the plugin as string we can't do that with the code below # plugin.get_plugin_name().capitalize() # We might have to do something like # getattr(sys.modules[__name__], p['Context']) # getattr(p['Context'], 'class_name') choices.append(("%s" % plugin, "%s (%sx)" % (plugin, count))) self.fields['plugins'] = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple, choices=choices, initial=[c[0] for c in choices], required=False )
{ "content_hash": "9f2d33cbc504d43653638cf0c819f07c", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 105, "avg_line_length": 37.92857142857143, "alnum_prop": 0.5917137476459511, "repo_name": "aldryn/aldryn-translator", "id": "1df16179ea964c49c689e035baf52aee61a8d440", "size": "2679", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "aldryn_translator/forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "49107" } ], "symlink_target": "" }
import os from json import ( dumps, loads, ) from azure.common import ( AzureException, ) from cryptography.hazmat.primitives.padding import PKCS7 from azure.storage.common._common_conversion import ( _encode_base64, _decode_base64_to_bytes ) from azure.storage.common._encryption import ( _generate_encryption_data_dict, _dict_to_encryption_data, _generate_AES_CBC_cipher, _validate_and_unwrap_cek, _EncryptionAlgorithm, ) from azure.storage.common._error import ( _ERROR_DECRYPTION_FAILURE, _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, _validate_not_none, _validate_key_encryption_key_wrap, ) from ._error import ( _ERROR_MESSAGE_NOT_ENCRYPTED ) def _encrypt_queue_message(message, key_encryption_key): ''' Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). Returns a json-formatted string containing the encrypted message and the encryption metadata. :param object message: The plain text message to be encrypted. :param object key_encryption_key: The user-provided key-encryption-key. Must implement the following methods: wrap_key(key)--wraps the specified key using an algorithm of the user's choice. get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. get_kid()--returns a string key id for this key-encryption-key. :return: A json-formatted string containing the encrypted message and the encryption metadata. :rtype: str ''' _validate_not_none('message', message) _validate_not_none('key_encryption_key', key_encryption_key) _validate_key_encryption_key_wrap(key_encryption_key) # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks content_encryption_key = os.urandom(32) initialization_vector = os.urandom(16) # Queue encoding functions all return unicode strings, and encryption should # operate on binary strings. message = message.encode('utf-8') cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) # PKCS7 with 16 byte blocks ensures compatibility with AES. padder = PKCS7(128).padder() padded_data = padder.update(message) + padder.finalize() # Encrypt the data. encryptor = cipher.encryptor() encrypted_data = encryptor.update(padded_data) + encryptor.finalize() # Build the dictionary structure. queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data), 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, content_encryption_key, initialization_vector)} return dumps(queue_message) def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver): ''' Returns the decrypted message contents from an EncryptedQueueMessage. If no encryption metadata is present, will return the unaltered message. :param str message: The JSON formatted QueueEncryptedMessage contents with all associated metadata. :param bool require_encryption: If set, will enforce that the retrieved messages are encrypted and decrypt them. :param object key_encryption_key: The user-provided key-encryption-key. Must implement the following methods: unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. get_kid()--returns a string key id for this key-encryption-key. :param function resolver(kid): The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. :return: The plain text message from the queue message. :rtype: str ''' try: message = loads(message) encryption_data = _dict_to_encryption_data(message['EncryptionData']) decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents']) except (KeyError, ValueError): # Message was not json formatted and so was not encrypted # or the user provided a json formatted message. if require_encryption: raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED) return message try: return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') except Exception: raise AzureException(_ERROR_DECRYPTION_FAILURE) def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None): ''' Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex. :param str message: The ciphertext to be decrypted. :param _EncryptionData encryption_data: The metadata associated with this ciphertext. :param object key_encryption_key: The user-provided key-encryption-key. Must implement the following methods: unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. get_kid()--returns a string key id for this key-encryption-key. :param function resolver(kid): The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. :return: The decrypted plaintext. :rtype: str ''' _validate_not_none('message', message) content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm): raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) # decrypt data decrypted_data = message decryptor = cipher.decryptor() decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) # unpad data unpadder = PKCS7(128).unpadder() decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) return decrypted_data
{ "content_hash": "c3e75b3573a9ce70c0521510b01c9143", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 132, "avg_line_length": 41.947712418300654, "alnum_prop": 0.7050483016516048, "repo_name": "Azure/azure-storage-python", "id": "08c206560ca6b728b06cd74c695227fd4e8663c3", "size": "6729", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-storage-queue/azure/storage/queue/_encryption.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "779" }, { "name": "Python", "bytes": "1674801" }, { "name": "Shell", "bytes": "168" } ], "symlink_target": "" }
from datetime import datetime, timezone from django import forms from django.test import TestCase from debug_toolbar.forms import SignedDataForm SIGNATURE = "-WiogJKyy4E8Om00CrFSy0T6XHObwBa6Zb46u-vmeYE" DATA = {"date": datetime(2020, 1, 1, tzinfo=timezone.utc), "value": "foo"} SIGNED_DATA = f'{{"date": "2020-01-01 00:00:00+00:00", "value": "foo"}}:{SIGNATURE}' class FooForm(forms.Form): value = forms.CharField() # Include a datetime in the tests because it's not serializable back # to a datetime by SignedDataForm date = forms.DateTimeField() class TestSignedDataForm(TestCase): def test_signed_data(self): data = {"signed": SignedDataForm.sign(DATA)} form = SignedDataForm(data=data) self.assertTrue(form.is_valid()) # Check the signature value self.assertEqual(data["signed"], SIGNED_DATA) def test_verified_data(self): form = SignedDataForm(data={"signed": SignedDataForm.sign(DATA)}) self.assertEqual( form.verified_data(), { "value": "foo", "date": "2020-01-01 00:00:00+00:00", }, ) # Take it back to the foo form to validate the datetime is serialized foo_form = FooForm(data=form.verified_data()) self.assertTrue(foo_form.is_valid()) self.assertDictEqual(foo_form.cleaned_data, DATA) def test_initial_set_signed(self): form = SignedDataForm(initial=DATA) self.assertEqual(form.initial["signed"], SIGNED_DATA) def test_prevents_tampering(self): data = {"signed": SIGNED_DATA.replace('"value": "foo"', '"value": "bar"')} form = SignedDataForm(data=data) self.assertFalse(form.is_valid())
{ "content_hash": "336b3d2f817c2243f6300c974e65a22f", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 84, "avg_line_length": 34.9, "alnum_prop": 0.6418338108882522, "repo_name": "tim-schilling/django-debug-toolbar", "id": "a619ae89d466f6ba0aedf656fc724cad4d9a53a7", "size": "1745", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "tests/test_forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "12475" }, { "name": "HTML", "bytes": "32325" }, { "name": "JavaScript", "bytes": "25502" }, { "name": "Jinja", "bytes": "88" }, { "name": "Makefile", "bytes": "1161" }, { "name": "Python", "bytes": "253285" } ], "symlink_target": "" }
import urllib import os import collections import sys totals = {} date_ranges = {} data = collections.defaultdict(dict) file_output = [] def printsymbols(sym_totals, output): for symbol, changes in reversed(sorted(sym_totals.iteritems(), key=lambda x: sym_totals[x[0]])): output += "%s %s%%\n" % (symbol, changes) output += "\n" return output def decode_files(window, symbol_list): global data, date_ranges for sym in symbol_list: with open("%s.csv" % sym) as sym_file: lines = sym_file.readlines() for start_row in range(1, len(lines)-window-1): new_window = window + start_row end_cols = lines[start_row].split(",") end_date = int(end_cols[0].replace("-", "")) end_price = float(end_cols[6]) start_cols = lines[new_window].split(",") start_date = int(start_cols[0].replace("-", "")) start_price = float(start_cols[6]) data[start_date][sym] = round((end_price-start_price)/start_price*100, 2) date_ranges[start_date] = end_date def calculate_performance(window, symbol_list): global totals, file_output for symbol in symbol_list: url = "http://ichart.finance.yahoo.com/table.csv?s=%s&g=w" % symbol.upper() if os.path.isfile("%s.csv" % symbol): os.remove("%s.csv" % symbol) urllib.urlretrieve(url, "%s.csv" % symbol) decode_files(window, symbol_list) for date in sorted(date_ranges.keys()): if not all(s in data[date].keys() for s in symbol_list): continue total = sum(data[date].values())/len(symbol_list) totals[date] = total file_output.append("%d - %d %.2f%%\n" % (date, date_ranges[date], total)) if not totals: sys.exit("These stocks never sold at the same time!") highest_date = max(totals.keys(), key=(lambda k: totals[k])) highest = "%s - %s %.2f%%" % (highest_date, date_ranges[highest_date], totals[highest_date]) lowest_date = min(totals.keys(), key=(lambda k: totals[k])) lowest = "%s - %s %.2f%%" % (lowest_date, date_ranges[lowest_date], totals[lowest_date]) latest_date = sorted(date_ranges.keys())[-1] latest = "%s - %s %.2f%%" % (latest_date, date_ranges[latest_date], totals[latest_date]) output = "" output += "\n\nHIGHEST: %s\n" % highest output = printsymbols(data[highest_date], output) output += "LOWEST: %s\n" % lowest output = printsymbols(data[lowest_date], output) output += "LATEST: %s\n" % latest output = printsymbols(data[latest_date], output) file_output.append(output) for symbol in symbol_list: if os.path.isfile("%s.csv" % symbol): os.remove("%s.csv" % symbol) return output def main(): out = open("out.txt", 'w+') timeperiod = float(raw_input("Time Period in Months: ")) window = int(round(timeperiod/12*365/7)) symbols = raw_input("Symbols: ") symbol_list = [symbol.lower().strip() for symbol in symbols.split(",") if " " not in symbol.strip()] output = calculate_performance(window, symbol_list) print output out.write(''.join(file_output)) out.flush() out.close() if __name__=="__main__": main()
{ "content_hash": "0360d4920cc78fc5ef3e28f3277965e5", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 102, "avg_line_length": 35.46067415730337, "alnum_prop": 0.6181875792141952, "repo_name": "bestvibes/stock-portfolio-predictions", "id": "e816110a569934952ad4f2e2769db3e1a6d1d1e6", "size": "3156", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "stock_script.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "3156" } ], "symlink_target": "" }
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class nsip6(base_resource) : """ Configuration for ip6 resource. """ def __init__(self) : self._ipv6address = "" self._scope = "" self._type = "" self._vlan = 0 self._nd = "" self._icmp = "" self._vserver = "" self._telnet = "" self._ftp = "" self._gui = "" self._ssh = "" self._snmp = "" self._mgmtaccess = "" self._restrictaccess = "" self._dynamicrouting = "" self._hostroute = "" self._ip6hostrtgw = "" self._metric = 0 self._vserverrhilevel = "" self._ospf6lsatype = "" self._ospfarea = 0 self._state = "" self._map = "" self._ownernode = 0 self._td = 0 self._iptype = [] self._curstate = "" self._viprtadv2bsd = False self._vipvsercount = 0 self._vipvserdowncount = 0 self._systemtype = "" self.___count = 0 @property def ipv6address(self) : """IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1. """ try : return self._ipv6address except Exception as e: raise e @ipv6address.setter def ipv6address(self, ipv6address) : """IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1 """ try : self._ipv6address = ipv6address except Exception as e: raise e @property def scope(self) : """Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local. """ try : return self._scope except Exception as e: raise e @scope.setter def scope(self, scope) : """Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local """ try : self._scope = scope except Exception as e: raise e @property def type(self) : """Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP. """ try : return self._type except Exception as e: raise e @type.setter def type(self, type) : """Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP """ try : self._type = type except Exception as e: raise e @property def vlan(self) : """The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094. """ try : return self._vlan except Exception as e: raise e @vlan.setter def vlan(self, vlan) : """The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094 """ try : self._vlan = vlan except Exception as e: raise e @property def nd(self) : """Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._nd except Exception as e: raise e @nd.setter def nd(self, nd) : """Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._nd = nd except Exception as e: raise e @property def icmp(self) : """Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._icmp except Exception as e: raise e @icmp.setter def icmp(self, icmp) : """Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._icmp = icmp except Exception as e: raise e @property def vserver(self) : """Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._vserver except Exception as e: raise e @vserver.setter def vserver(self, vserver) : """Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._vserver = vserver except Exception as e: raise e @property def telnet(self) : """Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._telnet except Exception as e: raise e @telnet.setter def telnet(self, telnet) : """Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._telnet = telnet except Exception as e: raise e @property def ftp(self) : """Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._ftp except Exception as e: raise e @ftp.setter def ftp(self, ftp) : """Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._ftp = ftp except Exception as e: raise e @property def gui(self) : """Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED. """ try : return self._gui except Exception as e: raise e @gui.setter def gui(self, gui) : """Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED """ try : self._gui = gui except Exception as e: raise e @property def ssh(self) : """Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._ssh except Exception as e: raise e @ssh.setter def ssh(self, ssh) : """Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._ssh = ssh except Exception as e: raise e @property def snmp(self) : """Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._snmp except Exception as e: raise e @snmp.setter def snmp(self, snmp) : """Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._snmp = snmp except Exception as e: raise e @property def mgmtaccess(self) : """Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._mgmtaccess except Exception as e: raise e @mgmtaccess.setter def mgmtaccess(self, mgmtaccess) : """Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._mgmtaccess = mgmtaccess except Exception as e: raise e @property def restrictaccess(self) : """Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._restrictaccess except Exception as e: raise e @restrictaccess.setter def restrictaccess(self, restrictaccess) : """Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._restrictaccess = restrictaccess except Exception as e: raise e @property def dynamicrouting(self) : """Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._dynamicrouting except Exception as e: raise e @dynamicrouting.setter def dynamicrouting(self, dynamicrouting) : """Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._dynamicrouting = dynamicrouting except Exception as e: raise e @property def hostroute(self) : """Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED. """ try : return self._hostroute except Exception as e: raise e @hostroute.setter def hostroute(self, hostroute) : """Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED """ try : self._hostroute = hostroute except Exception as e: raise e @property def ip6hostrtgw(self) : """IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0. """ try : return self._ip6hostrtgw except Exception as e: raise e @ip6hostrtgw.setter def ip6hostrtgw(self, ip6hostrtgw) : """IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0 """ try : self._ip6hostrtgw = ip6hostrtgw except Exception as e: raise e @property def metric(self) : """Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215. """ try : return self._metric except Exception as e: raise e @metric.setter def metric(self, metric) : """Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215 """ try : self._metric = metric except Exception as e: raise e @property def vserverrhilevel(self) : """Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6. * NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address. * ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state. * ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state. * VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states. When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address: * If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address. * If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state. *If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD. """ try : return self._vserverrhilevel except Exception as e: raise e @vserverrhilevel.setter def vserverrhilevel(self, vserverrhilevel) : """Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6. * NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address. * ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state. * ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state. * VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states. When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address: * If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address. * If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state. *If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD """ try : self._vserverrhilevel = vserverrhilevel except Exception as e: raise e @property def ospf6lsatype(self) : """Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL. """ try : return self._ospf6lsatype except Exception as e: raise e @ospf6lsatype.setter def ospf6lsatype(self, ospf6lsatype) : """Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL """ try : self._ospf6lsatype = ospf6lsatype except Exception as e: raise e @property def ospfarea(self) : """ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU. """ try : return self._ospfarea except Exception as e: raise e @ospfarea.setter def ospfarea(self, ospfarea) : """ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU """ try : self._ospfarea = ospfarea except Exception as e: raise e @property def state(self) : """Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED. """ try : return self._state except Exception as e: raise e @state.setter def state(self, state) : """Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED """ try : self._state = state except Exception as e: raise e @property def map(self) : """Mapped IPV4 address for the IPV6 address. """ try : return self._map except Exception as e: raise e @map.setter def map(self, map) : """Mapped IPV4 address for the IPV6 address. """ try : self._map = map except Exception as e: raise e @property def ownernode(self) : """ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255. """ try : return self._ownernode except Exception as e: raise e @ownernode.setter def ownernode(self, ownernode) : """ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255 """ try : self._ownernode = ownernode except Exception as e: raise e @property def td(self) : """Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094. """ try : return self._td except Exception as e: raise e @td.setter def td(self, td) : """Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094 """ try : self._td = td except Exception as e: raise e @property def iptype(self) : """The type of the IPv6 address.<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP. """ try : return self._iptype except Exception as e: raise e @property def curstate(self) : """Current state of this IP.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED. """ try : return self._curstate except Exception as e: raise e @property def viprtadv2bsd(self) : """Whether this route is advertised to FreeBSD. """ try : return self._viprtadv2bsd except Exception as e: raise e @property def vipvsercount(self) : """Number of vservers bound to this VIP. """ try : return self._vipvsercount except Exception as e: raise e @property def vipvserdowncount(self) : """Number of vservers bound to this VIP, which are down. """ try : return self._vipvserdowncount except Exception as e: raise e @property def systemtype(self) : """The type of the System. Possible Values: Standalone, HA, Cluster. Used for display purpose.<br/>Possible values = Stand-alone, HA, Cluster. """ try : return self._systemtype except Exception as e: raise e def _get_nitro_response(self, service, response) : """ converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(nsip6_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.nsip6 except Exception as e : raise e def _get_object_name(self) : """ Returns the value of object identifier argument """ try : if (self.ipv6address) : return str(self.ipv6address) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : """ Use this API to add nsip6. """ try : if type(resource) is not list : addresource = nsip6() addresource.ipv6address = resource.ipv6address addresource.scope = resource.scope addresource.type = resource.type addresource.vlan = resource.vlan addresource.nd = resource.nd addresource.icmp = resource.icmp addresource.vserver = resource.vserver addresource.telnet = resource.telnet addresource.ftp = resource.ftp addresource.gui = resource.gui addresource.ssh = resource.ssh addresource.snmp = resource.snmp addresource.mgmtaccess = resource.mgmtaccess addresource.restrictaccess = resource.restrictaccess addresource.dynamicrouting = resource.dynamicrouting addresource.hostroute = resource.hostroute addresource.ip6hostrtgw = resource.ip6hostrtgw addresource.metric = resource.metric addresource.vserverrhilevel = resource.vserverrhilevel addresource.ospf6lsatype = resource.ospf6lsatype addresource.ospfarea = resource.ospfarea addresource.state = resource.state addresource.map = resource.map addresource.ownernode = resource.ownernode addresource.td = resource.td return addresource.add_resource(client) else : if (resource and len(resource) > 0) : addresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : addresources[i].ipv6address = resource[i].ipv6address addresources[i].scope = resource[i].scope addresources[i].type = resource[i].type addresources[i].vlan = resource[i].vlan addresources[i].nd = resource[i].nd addresources[i].icmp = resource[i].icmp addresources[i].vserver = resource[i].vserver addresources[i].telnet = resource[i].telnet addresources[i].ftp = resource[i].ftp addresources[i].gui = resource[i].gui addresources[i].ssh = resource[i].ssh addresources[i].snmp = resource[i].snmp addresources[i].mgmtaccess = resource[i].mgmtaccess addresources[i].restrictaccess = resource[i].restrictaccess addresources[i].dynamicrouting = resource[i].dynamicrouting addresources[i].hostroute = resource[i].hostroute addresources[i].ip6hostrtgw = resource[i].ip6hostrtgw addresources[i].metric = resource[i].metric addresources[i].vserverrhilevel = resource[i].vserverrhilevel addresources[i].ospf6lsatype = resource[i].ospf6lsatype addresources[i].ospfarea = resource[i].ospfarea addresources[i].state = resource[i].state addresources[i].map = resource[i].map addresources[i].ownernode = resource[i].ownernode addresources[i].td = resource[i].td result = cls.add_bulk_request(client, addresources) return result except Exception as e : raise e @classmethod def delete(cls, client, resource) : """ Use this API to delete nsip6. """ try : if type(resource) is not list : deleteresource = nsip6() if type(resource) != type(deleteresource): deleteresource.ipv6address = resource else : deleteresource.ipv6address = resource.ipv6address deleteresource.td = resource.td return deleteresource.delete_resource(client) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : deleteresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].ipv6address = resource[i] else : if (resource and len(resource) > 0) : deleteresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].ipv6address = resource[i].ipv6address deleteresources[i].td = resource[i].td result = cls.delete_bulk_request(client, deleteresources) return result except Exception as e : raise e @classmethod def update(cls, client, resource) : """ Use this API to update nsip6. """ try : if type(resource) is not list : updateresource = nsip6() updateresource.ipv6address = resource.ipv6address updateresource.td = resource.td updateresource.nd = resource.nd updateresource.icmp = resource.icmp updateresource.vserver = resource.vserver updateresource.telnet = resource.telnet updateresource.ftp = resource.ftp updateresource.gui = resource.gui updateresource.ssh = resource.ssh updateresource.snmp = resource.snmp updateresource.mgmtaccess = resource.mgmtaccess updateresource.restrictaccess = resource.restrictaccess updateresource.state = resource.state updateresource.map = resource.map updateresource.dynamicrouting = resource.dynamicrouting updateresource.hostroute = resource.hostroute updateresource.ip6hostrtgw = resource.ip6hostrtgw updateresource.metric = resource.metric updateresource.vserverrhilevel = resource.vserverrhilevel updateresource.ospf6lsatype = resource.ospf6lsatype updateresource.ospfarea = resource.ospfarea return updateresource.update_resource(client) else : if (resource and len(resource) > 0) : updateresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].ipv6address = resource[i].ipv6address updateresources[i].td = resource[i].td updateresources[i].nd = resource[i].nd updateresources[i].icmp = resource[i].icmp updateresources[i].vserver = resource[i].vserver updateresources[i].telnet = resource[i].telnet updateresources[i].ftp = resource[i].ftp updateresources[i].gui = resource[i].gui updateresources[i].ssh = resource[i].ssh updateresources[i].snmp = resource[i].snmp updateresources[i].mgmtaccess = resource[i].mgmtaccess updateresources[i].restrictaccess = resource[i].restrictaccess updateresources[i].state = resource[i].state updateresources[i].map = resource[i].map updateresources[i].dynamicrouting = resource[i].dynamicrouting updateresources[i].hostroute = resource[i].hostroute updateresources[i].ip6hostrtgw = resource[i].ip6hostrtgw updateresources[i].metric = resource[i].metric updateresources[i].vserverrhilevel = resource[i].vserverrhilevel updateresources[i].ospf6lsatype = resource[i].ospf6lsatype updateresources[i].ospfarea = resource[i].ospfarea result = cls.update_bulk_request(client, updateresources) return result except Exception as e : raise e @classmethod def unset(cls, client, resource, args) : """ Use this API to unset the properties of nsip6 resource. Properties that need to be unset are specified in args array. """ try : if type(resource) is not list : unsetresource = nsip6() if type(resource) != type(unsetresource): unsetresource.ipv6address = resource else : unsetresource.ipv6address = resource.ipv6address unsetresource.td = resource.td return unsetresource.unset_resource(client, args) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : unsetresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : unsetresources[i].ipv6address = resource[i] else : if (resource and len(resource) > 0) : unsetresources = [ nsip6() for _ in range(len(resource))] for i in range(len(resource)) : unsetresources[i].ipv6address = resource[i].ipv6address unsetresources[i].td = resource[i].td result = cls.unset_bulk_request(client, unsetresources, args) return result except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : """ Use this API to fetch all the nsip6 resources that are configured on netscaler. """ try : if not name : obj = nsip6() response = obj.get_resources(client, option_) else : if type(name) == cls : if type(name) is not list : option_ = options() option_.args = nitro_util.object_to_string_withoutquotes(name) response = name.get_resource(client, option_) else : if name and len(name) > 0 : response = [nsip6() for _ in range(len(name))] for i in range(len(name)) : option_ = options() option_.args = nitro_util.object_to_string_withoutquotes(name[i]) response[i] = name[i].get_resource(client, option_) return response except Exception as e : raise e @classmethod def get_filtered(cls, client, filter_) : """ Use this API to fetch filtered set of nsip6 resources. filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = nsip6() option_ = options() option_.filter = filter_ response = obj.getfiltered(client, option_) return response except Exception as e : raise e @classmethod def count(cls, client) : """ Use this API to count the nsip6 resources configured on NetScaler. """ try : obj = nsip6() option_ = options() option_.count = True response = obj.get_resources(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e @classmethod def count_filtered(cls, client, filter_) : """ Use this API to count filtered the set of nsip6 resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = nsip6() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(client, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e : raise e class Iptype: NSIP = "NSIP" VIP = "VIP" SNIP = "SNIP" GSLBsiteIP = "GSLBsiteIP" ADNSsvcIP = "ADNSsvcIP" CLIP = "CLIP" class Ssh: ENABLED = "ENABLED" DISABLED = "DISABLED" class State: DISABLED = "DISABLED" ENABLED = "ENABLED" class Ospf6lsatype: INTRA_AREA = "INTRA_AREA" EXTERNAL = "EXTERNAL" class Scope: GLOBAL = "global" link_local = "link-local" class Nd: ENABLED = "ENABLED" DISABLED = "DISABLED" class Systemtype: Stand_alone = "Stand-alone" HA = "HA" Cluster = "Cluster" class Gui: ENABLED = "ENABLED" SECUREONLY = "SECUREONLY" DISABLED = "DISABLED" class Dynamicrouting: ENABLED = "ENABLED" DISABLED = "DISABLED" class Type: NSIP = "NSIP" VIP = "VIP" SNIP = "SNIP" GSLBsiteIP = "GSLBsiteIP" ADNSsvcIP = "ADNSsvcIP" CLIP = "CLIP" class Mgmtaccess: ENABLED = "ENABLED" DISABLED = "DISABLED" class Hostroute: ENABLED = "ENABLED" DISABLED = "DISABLED" class Ftp: ENABLED = "ENABLED" DISABLED = "DISABLED" class Vserverrhilevel: ONE_VSERVER = "ONE_VSERVER" ALL_VSERVERS = "ALL_VSERVERS" NONE = "NONE" VSVR_CNTRLD = "VSVR_CNTRLD" class Icmp: ENABLED = "ENABLED" DISABLED = "DISABLED" class Vserver: ENABLED = "ENABLED" DISABLED = "DISABLED" class Snmp: ENABLED = "ENABLED" DISABLED = "DISABLED" class Curstate: DISABLED = "DISABLED" ENABLED = "ENABLED" class Restrictaccess: ENABLED = "ENABLED" DISABLED = "DISABLED" class Telnet: ENABLED = "ENABLED" DISABLED = "DISABLED" class nsip6_response(base_response) : def __init__(self, length=1) : self.nsip6 = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.nsip6 = [nsip6() for _ in range(length)]
{ "content_hash": "0ce5f3a494a40ee7fd945cbd8928c60c", "timestamp": "", "source": "github", "line_count": 954, "max_line_length": 320, "avg_line_length": 32.664570230607964, "alnum_prop": 0.7000513445863552, "repo_name": "mahabs/nitro", "id": "b8f33331e2f23c3bf1113fc43127d851c9362cbd", "size": "31776", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsip6.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "498" }, { "name": "Python", "bytes": "10647176" } ], "symlink_target": "" }
from .directory import DirectoryManager from .directory_device import DirectoryDeviceManager from .directory_service import DirectoryServiceManager from .directory_service_auths import DirectoryServiceAuthsManager from .directory_service_policy import DirectoryServicePolicyManager from .directory_service_session import DirectoryServiceSessionManager from .directory_session import DirectorySessionManager from .keys import KeysManager from .organization_service import OrganizationServiceManager from .organization_service_policy import OrganizationServicePolicyManager from .entity_manager import EntityManager from .auth_policy import AuthPolicyManager from .sample_app_device_manager import SampleAppDeviceManager from .kobiton_manager import KobitonManager from .appium_device_manager import AppiumDeviceManager from .directory_totp import DirectoryTOTPManager
{ "content_hash": "bb4f3fcce9cba35612f412c74571759a", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 73, "avg_line_length": 54.1875, "alnum_prop": 0.8835063437139562, "repo_name": "LaunchKey/launchkey-python", "id": "e35ee5f9c45670b0d588e2be15e34cbde410cabe", "size": "867", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "features/steps/managers/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "32510" } ], "symlink_target": "" }
from pipeline.storage import default_storage from django import template from django.template.loader import render_to_string from pipeline.conf import settings from pipeline.packager import Packager, PackageNotFound from pipeline.utils import guess_type, path_is_url register = template.Library() class CommonNode(template.Node): def __init__(self, name): self.name = name def render(self, context): package_name = template.Variable(self.name).resolve(context) package = self.packages.get(package_name, {}) if package: package = {package_name: package} self.packager = Packager(css_packages=package, js_packages=package) try: package = self.packager.package_for(self.type, package_name) except PackageNotFound: return '' # fail silently, do not return anything if an invalid group is specified if settings.PIPELINE: return self.render_type_specific(package, package.output_filename) else: paths = self.packager.compile(package.paths) return self.render_individual(package, paths) def render_individual(self, package, paths): tags = [self.render_type_specific(package, path) for path in paths] return '\n'.join(tags) def get_url(self, path): if path_is_url(path): return path return default_storage.url(path) class CompressedCSSNode(CommonNode): def __init__(self, name): super(CompressedCSSNode, self).__init__(name) self.packages = settings.PIPELINE_CSS self.type = 'css' def render_css(self, package, path): template_name = package.template_name or "pipeline/css.html" context = package.extra_context context.update({ 'type': guess_type(path, 'text/css'), 'url': self.get_url(path) }) return render_to_string(template_name, context) render_type_specific = render_css class CompressedJSNode(CommonNode): def __init__(self, name): super(CompressedJSNode, self).__init__(name) self.packages = settings.PIPELINE_JS self.type = 'js' def render_js(self, package, path): template_name = package.template_name or "pipeline/js.html" context = package.extra_context context.update({ 'type': guess_type(path, 'text/javascript'), 'url': self.get_url(path) }) return render_to_string(template_name, context) render_type_specific = render_js def render_inline(self, package, js): context = package.extra_context context.update({ 'source': js }) return render_to_string("pipeline/inline_js.html", context) def render_individual(self, package, paths, templates=None): tags = [] if templates: tags.append(self.render_inline(package, templates)) return super(CompressedJSNode, self).render_individual(package, paths) + '\n'.join(tags) def common_compressed(parser, token, node_class): try: tag_name, name = token.split_contents() except ValueError: raise template.TemplateSyntaxError, '%r requires exactly one argument: the name of a group in the PIPELINE_CSS setting' % token.split_contents()[0] return node_class(name) @register.tag def compressed_css(parser, token): return common_compressed(parser, token, CompressedCSSNode) @register.tag def compressed_js(parser, token): return common_compressed(parser, token, CompressedJSNode)
{ "content_hash": "2de4f096499103fd86bfa71767b75832", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 155, "avg_line_length": 34.31730769230769, "alnum_prop": 0.6534043149341552, "repo_name": "fahhem/django-pipeline", "id": "6d298ade757fcdc022809f126c9f942b8c3d1fca", "size": "3569", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pipeline/templatetags/compressed.py", "mode": "33188", "license": "mit", "language": [ { "name": "CoffeeScript", "bytes": "52" }, { "name": "JavaScript", "bytes": "140" }, { "name": "Python", "bytes": "62920" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Building() result.template = "object/building/tatooine/shared_filler_building_tatt_style01_06.iff" result.attribute_template_id = -1 result.stfName("building_name","filler_building_tatt_style01_06") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "f36778afab5438a0165a095f86a7e50c", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 88, "avg_line_length": 26.46153846153846, "alnum_prop": 0.7151162790697675, "repo_name": "obi-two/Rebelion", "id": "04ce82a8ccffc35d48fad799f46031ce728ea86d", "size": "489", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/scripts/templates/object/building/tatooine/shared_filler_building_tatt_style01_06.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11818" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2293610" }, { "name": "CMake", "bytes": "39727" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7499185" }, { "name": "SQLPL", "bytes": "41864" } ], "symlink_target": "" }
from django.conf import settings from django.utils.translation import gettext as _ # TODO: can we use the builtin Django validator instead? # see: # https://docs.djangoproject.com/en/4.0/ref/validators/#fileextensionvalidator def validate_file_extension(value): from django.core.exceptions import ValidationError import os ext = os.path.splitext(value.name)[1] # [0] returns path+filename # TODO: we might improve this with more thorough checks of file types # rather than just the extensions. # check if VALID_EXTENSIONS is defined in settings.py # if not use defaults if hasattr(settings, 'VALID_EXTENSIONS'): valid_extensions = settings.VALID_EXTENSIONS else: valid_extensions = ['.txt', '.asc', '.htm', '.html', '.pdf', '.doc', '.docx', '.odt', '.jpg', '.png', '.eml'] if not ext.lower() in valid_extensions: # TODO: one more check in case it is a file with no extension; we # should always allow that? if not (ext.lower() == '' or ext.lower() == '.'): raise ValidationError( _('Unsupported file extension: ') + ext.lower() )
{ "content_hash": "8b4cc03a88c8ccf22d65b3895a7b58d0", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 84, "avg_line_length": 37.0625, "alnum_prop": 0.6306913996627319, "repo_name": "django-helpdesk/django-helpdesk", "id": "5fb80e39266c3a941a5d95ac015ee56ba7fb569e", "size": "1242", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "helpdesk/validators.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "13474" }, { "name": "HTML", "bytes": "179798" }, { "name": "JavaScript", "bytes": "44436" }, { "name": "Makefile", "bytes": "2973" }, { "name": "Python", "bytes": "539762" }, { "name": "SCSS", "bytes": "7910" }, { "name": "Shell", "bytes": "718" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('catalog', '0005_name_created'), ] operations = [ migrations.AlterField( model_name='name', name='created', field=models.DateTimeField(auto_now_add=True), ), ]
{ "content_hash": "a4e612226aea8004a159ef0f6c7a1572", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 58, "avg_line_length": 21.055555555555557, "alnum_prop": 0.5910290237467019, "repo_name": "karilint/django_local_library", "id": "8bf47a78798599bb9dcd3b7b909a442ecaa0d5a1", "size": "450", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "catalog/migrations/0006_auto_20170511_1938.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "552" }, { "name": "HTML", "bytes": "17316" }, { "name": "Python", "bytes": "57064" } ], "symlink_target": "" }
import numpy as np from functools import reduce def lw_cov(X, center=True): ''' Calculate the well-conditioned (it is invertible) and accurate (it is asymptotically more accurate than the sample covariance matrix) Ledoit-Wolf covariance estimator [1]. For X, a [p x n] matrix, where n is the number of observations, and p is the number of variables, a robust covariance estimate is returned. It analytically estimates the optimial weight \lambda^{\star} for a weigheted combination of the sample mean S and and a prior of equal variance and zero covariance: S^{\star} = \lambda^{\star} \S + (1-\lambda^{\star}) * \sigma * I See (14) of [1]. \lambda^star{\star} is based on the variation in the covariance and the difference between the prior and and the sample covariance S. [1] Olivier Ledoit and Michael Wolf. A well-conditioned estimator for large-dimensional covariance matrices. Journal of Multivariate Analysis, 88(2):365--411, February 2004. ''' X = np.atleast_2d(X) p, n = X.shape if center: X = X - np.atleast_2d(np.mean(X, 1)).T S = np.dot(X, X.T) / n prior = np.mean(np.diag(S)) * np.eye(p) # m * I b2, d2, lamb = lw_cov_base(X, S, prior) return lamb * prior + (1.-lamb) * S def lw_cov_base(X0, S, prior): ''' Calculate \lambda^{\star}, d^2 and b^2 for Ledoit-Wolf covariance estimator. Separate method for unit-testing. ''' p, n = X0.shape # Calculate \delta^2 using Lemma 3.3: d2 = np.linalg.norm(S - prior, 'fro') ** 2 # Calculate \bar{\beta}^2 as in Lemma 3.4, but using # var(x) = E(x^2) - [E(x)]^2: XoX = X0**2 varS = np.dot(XoX, XoX.T) / n - S**2 b2 = np.sum(varS) / n # Calculate shrinkage intensity lamb = np.clip(b2/d2, 0, 1) return b2, d2, lamb def norm_kl_divergence(Sig_p, mu_p, inv_Sig_q, mu_q): ''' Calculate Kullback-Leibler divergence between two multivariate *normal* distributions analytically, specified by covariance Sig_p and mean mu_p and inverse covariance inv_Sig_q and mean mu_q: KL(P || Q) = \int_x p(x) \log(p(x) / q(x)) dx Please note that for the Q distribution the *INVERSE COVARIANCE* has to be specified. ''' Sig_p, mu_p, inv_Sig_q, mu_q = np.atleast_2d(Sig_p, mu_p, inv_Sig_q, mu_q) assert Sig_p.shape == inv_Sig_q.shape assert mu_p.size == mu_q.size == Sig_p.shape[0] A = np.dot(Sig_p, inv_Sig_q) B = np.trace(np.eye(mu_q.size) - A) C = reduce(np.dot, [(mu_p - mu_q), inv_Sig_q, (mu_p - mu_q).T]) return -.5 * (np.log(np.linalg.det(A)) + B - C).squeeze() def kl(P, Q): ''' Return the Kullack-Leibler divergence between two distributions P and Q using norm_kl_divergence(). Please note that the inversion of the covariance matrix of Q (\Sigma_Q) might cause numerically unstable results although lw_cov() is used to estimate \Sigma_Q. P and Q are [p x n] matrices, where p is the number of variables, and n is the number of observations. The Kullback-Leibler divergence is an asymmetric distance measure between two probability measures P and Q, measuring the extra information needed to repre- sent samples from P when using a code based on Q. ''' assert P.shape[0] == Q.shape[0], \ 'Distributions do not have same number of variables' S_p, m_p = lw_cov(P), np.mean(P, 1) S_q, m_q = lw_cov(Q), np.mean(Q, 1) return norm_kl_divergence(S_p, m_p, np.linalg.pinv(S_q), m_q) def roc(scores, labels): ''' Calc (TPs, FPs) pairs for different thresholds. This method is used for ROC plotting and AUC-ROC calculation. Labels are encoded as 0 or 1. Ties are handled correctly. ''' scores, labels = np.asarray(scores), np.asarray(labels) assert scores.ndim == labels.ndim == 1 assert np.all(np.unique(labels) == [0, 1]) si = np.argsort(scores)[::-1] scores, labels = scores[si], labels[si] # slide threshold from above TPs = np.cumsum(labels == 1) / np.sum(labels == 1).astype(float) FPs = np.cumsum(labels != 1) / np.sum(labels != 1).astype(float) # handle equal scoress ui = np.concatenate([np.diff(scores), np.array([1])]) != 0 TPs, FPs = TPs[ui], FPs[ui] # add (0, 0) to ROC TPs = np.concatenate([[0], TPs]) FPs = np.concatenate([[0], FPs]) return (TPs, FPs) def auc(scores, labels): ''' Calculate area under curve (AUC) of the receiver operating characteristic (ROC) using roc(). Ties are handles correctly. The AUC of a classifier is equivalent to the probability that the classifier will rank a randomly chosen positive instanance higher than a randomly chosen negative instance [1]. I.e., the AUC is a value between 0 and 1, and AUC of .5 indicates random ranking performance. [1] Tom Fawcett. An introduction to ROC analysis. Pattern Recognition Letters, 27(8):861-874, 2005. ''' TPs, FPs = roc(scores, labels) return np.trapz(TPs, FPs) def auc_confidence(N, rho=.5, delta=.05): ''' Calculate the confidence interval epsilon for the AUC statistic. N is the number of instances, rho is the percentage of *positive* instances, and delta is the false postive rate: \epsilon = \sqrt{\frac{log\frac{2}{\delta}}{2\rho(1-\rho)N}} [1] Shivani Agarwal, Thore Graepel, Ralf Herbrich, and Dan Roth. A large deviation bound for the area under the ROC curve. In Advances in Neural Information Processing Systems, volume 17, pages 9-16, 2005. ''' return np.sqrt(np.log(2. / delta) / (2 * rho * (1 - rho) * N)) def mut_inf(conf_mat): ''' Calculate the discrete mutual information from conf_mat. Returns the mutual information in bits. ''' pxy = np.array(conf_mat, float) assert (pxy >= 0).all(), 'Cannot handle marginal probabilities P_{XY} < 0' pxy /= np.sum(pxy) pxs = np.sum(pxy, axis=1) pys = np.sum(pxy, axis=0) bits = 0 for x in range(pxy.shape[0]): for y in range(pxy.shape[1]): if pxy[x, y] == 0 or pxs[x] == 0 or pys[y] == 0: continue bits += pxy[x, y] * np.log2(pxy[x, y]/(pxs[x] * pys[y])) return bits def _sort(x): ''' Sort data and also return a reverse index ''' n = len(x) o = np.argsort(x) original_order = np.zeros(n, dtype=int) original_order[o] = np.arange(n) return (np.asarray(x)[o], original_order) def bonferroni(ps): ''' Adjust p-values from multiple comparisons using Bonferroni correction. Parameters ---------- ps : list of floats List of p values Outputs ------- ps : list of floats The corrected p-values ''' n = len(ps) return np.clip(np.asarray(ps) * n, 0, 1) def bonferroni_holm(ps): ''' Adjust p-values from multiple comparisons using Bonferroni-Holm correction. Parameters ---------- ps : list of floats List of p values Outputs ------- ps : list of floats The corrected p-values ''' n = len(ps) ps, original_order = _sort(ps) adj_ps = np.asarray(ps) * (n - np.arange(n)) return np.clip(adj_ps[original_order], 0, 1) def benjamini_hochberg(ps): ''' Adjust p-values from multiple comparisons using Benjamini-Hochberg correction. Parameters ---------- ps : list of floats List of p values Outputs ------- ps : list of floats The corrected p-values ''' n = len(ps) ps, original_order = _sort(ps) adj_ps = (np.asarray(ps) * n) / (np.arange(n) + 1.0) return np.clip(adj_ps[original_order], 0, 1)
{ "content_hash": "b8a5a597ef92424c8f1df2978dd9c541", "timestamp": "", "source": "github", "line_count": 234, "max_line_length": 80, "avg_line_length": 33.01282051282051, "alnum_prop": 0.6194174757281553, "repo_name": "wmvanvliet/psychic", "id": "62782c52880d06d58f7f2b07349999c3145ad4d5", "size": "7725", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "psychic/stat.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "136" }, { "name": "Python", "bytes": "421723" } ], "symlink_target": "" }
from setuptools import find_packages, setup setup( name='pycharts', version='0.1.5', url='https://github.com/ycharts/pycharts', license='MIT', description='Client for the YCharts API', author='YCharts Engineering', author_email='support@ycharts.com', keywords='development ycharts api rest restful', packages = find_packages(), install_requires=[], zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Internet :: WWW/HTTP', ] )
{ "content_hash": "ad4ffa34a47c43b3f232bdf1fe59f984", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 54, "avg_line_length": 33.55555555555556, "alnum_prop": 0.6158940397350994, "repo_name": "ycharts/pycharts", "id": "894a307cdb8e5532225d54aa1ba820b3ac6d2cf3", "size": "906", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "28723" } ], "symlink_target": "" }
"""Agents following static paths """ __author__ = "Liyan Chen" __copyright__ = "Copyright (c) 2017 Malmactor" __license__ = "MIT" import numpy as np import time def static_agent(path, simulation, render, config=None): init_pos = np.array([0, 2, 0]) if config is None or "init_pos" not in config else config["init_pos"] simulation.mario.state[:, 0] = init_pos for step in path: simulation.advance_frame(step) renderables = simulation.get_renderable() render.render(renderables) if config and config["sec_per_frame"]: time.sleep(config["sec_per_frame"])
{ "content_hash": "fdad90ac33a9600bb44504bdb721d703", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 104, "avg_line_length": 26.73913043478261, "alnum_prop": 0.6471544715447154, "repo_name": "Malmactor/malrio", "id": "ff824dfe822fe2e752acbb447cf1234180b32cae", "size": "615", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/Agent/static_path_agent.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "9771" }, { "name": "Python", "bytes": "113423" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.core.urlresolvers import reverse from django.views.generic import TemplateView, CreateView from django.views.generic import View from braces.views import ( AjaxResponseMixin, JSONResponseMixin, LoginRequiredMixin, StaffuserRequiredMixin, ) from base.view_utils import BaseMixin from .forms import DocumentForm from .models import Document class HomeView(BaseMixin, TemplateView): template_name = 'example/home.html' class DashView( LoginRequiredMixin, StaffuserRequiredMixin, BaseMixin, TemplateView): template_name = 'dash/dash.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update(document_list=Document.objects.order_by('file')) return context class SettingsView( LoginRequiredMixin, StaffuserRequiredMixin, BaseMixin, TemplateView): """Has to be there to log in nothing to do so duplicate home""" template_name = 'dash/settings.html' class FileDropDemoView( LoginRequiredMixin, StaffuserRequiredMixin, BaseMixin, CreateView): form_class = DocumentForm model = Document template_name = 'dash/filedrop_demo.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update(document_list=Document.objects.order_by('file')) return context def get_success_url(self): return reverse('filedrop.demo') class AjaxFileUploadView( LoginRequiredMixin, StaffuserRequiredMixin, JSONResponseMixin, AjaxResponseMixin, View): """ View for uploading files via AJAX. """ def post_ajax(self, request, *args, **kwargs): uploaded_file = request.FILES['file'] document = Document.objects.create(file=uploaded_file) response_dict = { 'message': 'File uploaded successfully!', 'file_path': document.file.name, 'document_id': document.id } return self.render_json_response(response_dict, status=200)
{ "content_hash": "5c5222278137ec1dd14346470fd42f68", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 77, "avg_line_length": 27.8, "alnum_prop": 0.6940047961630695, "repo_name": "pkimber/base", "id": "7b24cc10fe4c4835b540f9fe05aa614097a3a7fe", "size": "2111", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "example_base/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "19039" }, { "name": "HTML", "bytes": "34371" }, { "name": "JavaScript", "bytes": "66591" }, { "name": "Python", "bytes": "57404" }, { "name": "Shell", "bytes": "386" } ], "symlink_target": "" }
from random import randint import math class AbstractNameGenerator(object): def __init__(self): return def nameGen(self,firstNames, lastNames): names2 = firstNames names1 = lastNames firstNameRnd = int( math.floor( randint(0,len(names2) - 1) ) ) lastNameRnd = int( math.floor( randint(0,len(names1) - 1) ) ) first_name = firstNames[firstNameRnd].lower() last_name = lastNames[lastNameRnd].lower() return {'first_name': first_name, 'last_name': last_name}
{ "content_hash": "b2e72621aec2f1af5143721328bcf504", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 70, "avg_line_length": 28.05263157894737, "alnum_prop": 0.6341463414634146, "repo_name": "kittolau/selepy", "id": "5b4a1b29c22db5db7f416f7d056e5ee92d135f47", "size": "533", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web_helper/name_generator/abstract_name_generator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "125250" }, { "name": "Shell", "bytes": "138" } ], "symlink_target": "" }
import sys import random from time import sleep import logging from google import pygoogle from multiprocessing import Process, Value, Manager, Array from ctypes import c_char, c_char_p import subprocess import json MAX_OUTPUT = 100 * 1024 resultStr = Array(c_char, MAX_OUTPUT); def clear_output(): resultStr.value = json.dumps([]) def sanitize_output(string): string = string.replace("{", "\{") string = string.replace("}", "\}") string = string.replace("|", "\|") string = string.replace("\n", " ") return string def create_result(title, action): return "{" + title + " |" + action + " }" def append_output(title, action): title = sanitize_output(title) action = sanitize_output(action) results = json.loads(resultStr.value) if len(results) < 2: results.append(create_result(title, action)) else: # ignore the bottom two default options results.insert(-2, create_result(title, action)) resultStr.value = json.dumps(results) def prepend_output(title, action): title = sanitize_output(title) action = sanitize_output(action) results = json.loads(resultStr.value) results = [create_result(title, action)] + results resultStr.value = json.dumps(results) def update_output(): results = json.loads(resultStr.value) print "".join(results) sys.stdout.flush() google_thr = None def google(query): sleep(.5) # so we aren't querying EVERYTHING we type g = pygoogle(userInput, log_level=logging.CRITICAL) g.pages = 1 out = g.get_urls() if (len(out) >= 1): append_output(out[0], "xdg-open " + out[0]) update_output() find_thr = None def find(query): sleep(.5) # Don't be too aggressive... find_out = str(subprocess.check_output(["find", "/home", "-name", query])) find_array = find_out.split("\n")[:-1] if (len(find_array) == 0): return for i in xrange(min(5, len(find_array))): append_output(str(find_array[i]),"urxvt -e bash -c 'if [[ $(file "+find_array[i]+" | grep text) != \"\" ]]; then vim "+find_array[i]+"; else cd $(dirname "+find_array[i]+"); bash; fi;'"); update_output() def get_process_output(process, formatting, action): process_out = str(subprocess.check_output(process)) if "%s" in formatting: out_str = formatting % (process_out) else: out_str = formatting if "%s" in action: out_action = action % (process_out) else: out_action = action return (out_str, out_action) special = { "t": (lambda x: ("terminal","urxvt")), "ch": (lambda x: ("chromium","chromium")), "vi": (lambda x: ("vim","urxvt -e vim")), "bat": (lambda x: get_process_output("acpi", "%s", "")) }; while 1: userInput = sys.stdin.readline() userInput = userInput[:-1] # Clear results clear_output() # Kill previous worker threads if google_thr != None: google_thr.terminate() if find_thr != None: find_thr.terminate() # We don't handle empty strings if userInput == '': update_output() continue # Could be a command... append_output("execute '"+userInput+"'", userInput); # Could be bash... append_output("run '"+userInput+"' in a shell", "urxvt -e bash -c '"+userInput+" && bash'"); # Scan for keywords for keyword in special: if userInput[0:len(keyword)] == keyword: out = special[keyword](userInput) if out != None: prepend_output(*out); # Is this python? try: out = eval(userInput) if (type(out) != str and str(out)[0] == '<'): pass # We don't want gibberish type stuff else: prepend_output("python: "+str(out), "urxvt -e python2.7 -i -c 'print "+userInput+"'") except Exception as e: pass # Spawn worker threads google_thr = Process(target=google, args=(userInput,)) google_thr.start() find_thr = Process(target=find, args=(userInput,)) find_thr.start() update_output()
{ "content_hash": "d5899428bc470612c62259a0c2a69a50", "timestamp": "", "source": "github", "line_count": 137, "max_line_length": 191, "avg_line_length": 27.83211678832117, "alnum_prop": 0.6435877261998426, "repo_name": "lygaret/dotfiles", "id": "6f8e4d373a94e9e392405cfd4dac847e7184b46b", "size": "3835", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lighthouse./cmd.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "17749" }, { "name": "Ruby", "bytes": "22" }, { "name": "Shell", "bytes": "12947" }, { "name": "VimL", "bytes": "13878" } ], "symlink_target": "" }
""" CheckiOReferee is a base referee for checking you code. arguments: tests -- the dict contains tests in the specific structure. You can find an example in tests.py. cover_code -- is a wrapper for the user function and additional operations before give data in the user function. You can use some predefined codes from checkio.referee.cover_codes checker -- is replacement for the default checking of an user function result. If given, then instead simple "==" will be using the checker function which return tuple with result (false or true) and some additional info (some message). You can use some predefined codes from checkio.referee.checkers add_allowed_modules -- additional module which will be allowed for your task. add_close_builtins -- some closed builtin words, as example, if you want, you can close "eval" remove_allowed_modules -- close standard library modules, as example "math" checkio.referee.checkers checkers.float_comparison -- Checking function fabric for check result with float numbers. Syntax: checkers.float_comparison(digits) -- where "digits" is a quantity of significant digits after coma. checkio.referee.cover_codes cover_codes.unwrap_args -- Your "input" from test can be given as a list. if you want unwrap this before user function calling, then using this function. For example: if your test's input is [2, 2] and you use this cover_code, then user function will be called as checkio(2, 2) cover_codes.unwrap_kwargs -- the same as unwrap_kwargs, but unwrap dict. """ from checkio.signals import ON_CONNECT from checkio import api from checkio.referees.io import CheckiOReferee from checkio.referees import cover_codes from checkio.referees import checkers from tests import TESTS api.add_listener( ON_CONNECT, CheckiOReferee( tests=TESTS, function_name="find_word" # checker=None, # checkers.float.comparison(2) # add_allowed_modules=[], # add_close_builtins=[], # remove_allowed_modules=[] ).on_ready)
{ "content_hash": "5490b831f6ee6b3bf581219110df4bcc", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 102, "avg_line_length": 47.15217391304348, "alnum_prop": 0.7030889810972798, "repo_name": "Bryukh-Checkio-Tasks/checkio-mission-gate-puzzle", "id": "913addbd645b260569eb0b6bf265167fcb082fed", "size": "2169", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "verification/referee.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3722" }, { "name": "JavaScript", "bytes": "4879" }, { "name": "Python", "bytes": "12172" } ], "symlink_target": "" }
from m2core.m2core import M2Core, logger from m2core import bases from m2core import data_schemes from m2core import db from m2core import utils from m2core import common
{ "content_hash": "f493da0e71b5dd8de720a4d2570fbd47", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 40, "avg_line_length": 28.5, "alnum_prop": 0.8362573099415205, "repo_name": "mdutkin/m2core", "id": "505f349465a4be2fd4d0595f3bfbdddd77b44a26", "size": "171", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "m2core/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "197750" } ], "symlink_target": "" }
"""Auto-generated file, do not edit by hand. EE metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_EE = PhoneMetadata(id='EE', country_code=None, international_prefix=None, general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}'), toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), emergency=PhoneNumberDesc(national_number_pattern='11[02]', possible_number_pattern='\\d{3}', example_number='112'), short_code=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', example_number='116'), standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), short_data=True)
{ "content_hash": "31dd31312ba7bbec3e0d792961dc32c9", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 122, "avg_line_length": 80.75, "alnum_prop": 0.7492260061919505, "repo_name": "agentr13/python-phonenumbers", "id": "635456e8aa44702ad63f56ddcbeaa367a65defe7", "size": "969", "binary": false, "copies": "8", "ref": "refs/heads/dev", "path": "python/phonenumbers/shortdata/region_EE.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Protocol Buffer", "bytes": "20454" }, { "name": "Python", "bytes": "19850381" } ], "symlink_target": "" }
import os import sys from haproxy_stats import HaproxyStats from vrouter.loadbalancer.ttypes import \ LoadbalancerStats, UveLoadbalancerStats, UveLoadbalancerTrace LB_BASE_DIR = '/var/lib/contrail/loadbalancer/' class LoadbalancerStatsUVE(object): def __init__(self): self.driver = HaproxyStats() if not self.driver.lbaas_dir: self.driver.lbaas_dir = LB_BASE_DIR try: self.old_pool_uuids = os.listdir(self.driver.lbaas_dir) except OSError: self.old_pool_uuids = [] def _uve_get_stats(self, stats): obj_stats = LoadbalancerStats() obj_stats.obj_name = stats['name'] obj_stats.uuid = stats['name'] obj_stats.status = stats['status'] obj_stats.vrouter = stats['vrouter'] for attr in dir(obj_stats): if attr in stats and stats[attr].isdigit(): setattr(obj_stats, attr, int(stats[attr])) return obj_stats def _uve_get_member_stats(self, stats): member_stats = [] for stat in stats: obj_stats = LoadbalancerStats() obj_stats.obj_name = stat['name'] obj_stats.uuid = stat['name'] obj_stats.status = stat['status'] obj_stats.vrouter = stat['vrouter'] for attr in dir(obj_stats): if attr in stat and stat[attr].isdigit(): setattr(obj_stats, attr, int(stat[attr])) member_stats.append(obj_stats) return member_stats def _send_loadbalancer_uve(self): try: pool_uuids = os.listdir(self.driver.lbaas_dir) except OSError: return # delete stale uves for pool_uuid in self.old_pool_uuids: if pool_uuid not in pool_uuids: uve_lb = UveLoadbalancerStats(name=pool_uuid, deleted=True) uve_lb.listener = {} uve_lb.pool = {} uve_lb.member = {} uve_trace = UveLoadbalancerTrace(data=uve_lb) uve_trace.send() self.old_pool_uuids = pool_uuids # send stats for pool_uuid in pool_uuids: lb_stats = self.driver.get_stats(pool_uuid) if not 'listener' in lb_stats or not len(lb_stats['listener']): uve_lb = UveLoadbalancerStats(name=pool_uuid, deleted=True) uve_lb.listener = {} uve_lb.pool = {} uve_lb.member = {} uve_trace = UveLoadbalancerTrace(data=uve_lb) uve_trace.send() continue uve_lb = UveLoadbalancerStats() uve_lb.name = pool_uuid uve_lb.listener = {} uve_lb.pool = {} uve_lb.member = {} count = 0 total_items = len(lb_stats['listener']) while (total_items > count): lb_stat = lb_stats['listener'][count] name = lb_stat['name'] value = self._uve_get_stats(lb_stat) uve_lb.listener[name] = value count = count + 1 count = 0 total_items = len(lb_stats['pool']) while (total_items > count): lb_stat = lb_stats['pool'][count] name = lb_stat['name'] value = self._uve_get_stats(lb_stat) uve_lb.pool[name] = value count = count + 1 count = 0 total_items = len(lb_stats['member']) while (total_items > count): lb_stat = lb_stats['member'][count] name = lb_stat['name'] value = self._uve_get_stats(lb_stat) uve_lb.member[name] = value count = count + 1 uve_trace = UveLoadbalancerTrace(data=uve_lb) uve_trace.send() def send_loadbalancer_stats(self): try: self._send_loadbalancer_uve() except Exception as e: sys.stderr.write('LB stats failure ' + str(e) + '\n')
{ "content_hash": "243f19c126a28fac1522ad94cd1cb469", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 75, "avg_line_length": 36.232142857142854, "alnum_prop": 0.5241498275012322, "repo_name": "tcpcloud/contrail-controller", "id": "9a7e136a14f1e9c9eb02df8314dd9698a09bef36", "size": "4058", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/nodemgr/vrouter_nodemgr/loadbalancer_stats.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "88309" }, { "name": "C++", "bytes": "20774234" }, { "name": "CSS", "bytes": "531" }, { "name": "GDB", "bytes": "44610" }, { "name": "Groff", "bytes": "41295" }, { "name": "HTML", "bytes": "519766" }, { "name": "Java", "bytes": "171966" }, { "name": "LLVM", "bytes": "2937" }, { "name": "Lua", "bytes": "19459" }, { "name": "Makefile", "bytes": "12449" }, { "name": "Protocol Buffer", "bytes": "6129" }, { "name": "Python", "bytes": "5701059" }, { "name": "Shell", "bytes": "52859" }, { "name": "Thrift", "bytes": "8382" }, { "name": "Yacc", "bytes": "7737" } ], "symlink_target": "" }
import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None): """Async context manager that waits for work to complete on given CUDA streams.""" if not torch.cuda.is_available(): yield return stream_before_context_switch = torch.cuda.current_stream() if not streams: streams = [stream_before_context_switch] else: streams = [s if s else stream_before_context_switch for s in streams] end_events = [ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams ] if DEBUG_COMPLETED_TIME: start = torch.cuda.Event(enable_timing=True) stream_before_context_switch.record_event(start) cpu_start = time.monotonic() logger.debug('%s %s starting, streams: %s', trace_name, name, streams) grad_enabled_before = torch.is_grad_enabled() try: yield finally: current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_end = time.monotonic() for i, stream in enumerate(streams): event = end_events[i] stream.record_event(event) grad_enabled_after = torch.is_grad_enabled() # observed change of torch.is_grad_enabled() during concurrent run of # async_test_bboxes code assert (grad_enabled_before == grad_enabled_after ), 'Unexpected is_grad_enabled() value change' are_done = [e.query() for e in end_events] logger.debug('%s %s completed: %s streams: %s', trace_name, name, are_done, streams) with torch.cuda.stream(stream_before_context_switch): while not all(are_done): await asyncio.sleep(sleep_interval) are_done = [e.query() for e in end_events] logger.debug( '%s %s completed: %s streams: %s', trace_name, name, are_done, streams, ) current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_time = (cpu_end - cpu_start) * 1000 stream_times_ms = '' for i, stream in enumerate(streams): elapsed_time = start.elapsed_time(end_events[i]) stream_times_ms += f' {stream} {elapsed_time:.2f} ms' logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, stream_times_ms) @contextlib.asynccontextmanager async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream)
{ "content_hash": "b80e1c3b9be3e9b12cfb1a6c0caec6ce", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 79, "avg_line_length": 33.69421487603306, "alnum_prop": 0.5761589403973509, "repo_name": "open-mmlab/mmdetection", "id": "fa12bfcaff1e781b0a8cc7d7c8b839c2f2955a05", "size": "4125", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mmdet/utils/contextmanagers.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2540" }, { "name": "Python", "bytes": "4811377" }, { "name": "Shell", "bytes": "47911" } ], "symlink_target": "" }
import matplotlib.pyplot as plt import numpy as np import numpy.ma as ma import networkx as nx import itertools import toolz from functools import reduce from operator import mul def product(iterable): """Calculate product of items in iterable. """ return reduce(mul, iterable, 1) def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) return zip(a, b) def plot(x, y, quantity, visible, yscale="linear"): """ Plot `y` as function of `x` where `y` has quantity `quantity` and `x` is frequency :math:`f`. :param x: Array of values for the `x`-axis. :param y: Array of values for the `y`-axis. :param quantity: Quantity :param visible: Array of booleans. :returns: Figure. :type: :class:`matplotlib.figure.Figure` """ mask = ~visible.astype(bool) x = ma.masked_array(x, mask=mask).compressed() y = (ma.masked_array(i, mask=mask).compressed() for i in y) try: label = ATTRIBUTES[quantity] except KeyError: label = "Unknown quantity" fig = plt.figure() ax = fig.add_subplot(111) for i in y: ax.scatter(x, i) ax.set_xscale("log") ax.set_yscale(yscale) ax.set_xlabel("$f$ in Hz") ax.set_ylabel(label) ax.grid() return fig ATTRIBUTES = { "pressure_level": "$L_p$ in dB", "velocity_level": "$L_v$ in dB", "power_level": "$L_P$ in dB", "mass": "$m$ in kg", "impedance": "$Z$ in ...", "resistance": "$R$ in ...", "reactance": "$X$ in ...", "resistance_point_average": "$R$ in ...", "mobility": "$Y$ in ...", "modal_density": "$n$ in ...", "average_frequency_spacing": "$\Delta f$ in Hz", "soundspeed_group": "$c_{g}$ in m/s", "soundspeed_phase": "$c_{\phi}$ in m/s", "clf": "$\eta$ in ...", "input_power": "$P$ in W", "loss_factor": "$\eta$ in $\mathrm{rad}^{-1}$", "wavenumber": "$k$ in rad/m", "power": "$P$ in W", "conductance": "$G$ in ...", "susceptance": "$B$ in ...", "sound_reduction_index": "$R$ in ...", "tau": "$\tau$ in ...", } # edges = ((obj.name, getattr(obj, b).name) for obj in getattr(system, b+'s')) # G.add_edges_from(edges) # except AttributeError: # pass # try: # edges = ((obj.name, getattr(obj, b).name) for obj in getattr(system, b+'s')) # G.add_edges_from(edges) # except AttributeError: # pass def graph_couplings(system): """Graph with subsystems as nodes and couplings as edges. """ G = nx.DiGraph() nodes = (obj.name for obj in system.subsystems) edges = ( (obj.subsystem_from.name, obj.subsystem_to.name, {"name": obj.name}) for obj in system.couplings ) G.add_nodes_from(nodes) G.add_edges_from(edges) return G class Path(object): """Path between two subsystems. .. warning:: If changes are made in the actual paths in the system, then this object will yield invalid results! """ def __init__(self, system, path): self._path = path """The path is a list of names of subsystems and couplings. """ self._system = system """Reference to system. """ def __repr__(self): return "Path({})".format(self._path) def __str__(self): return str(self._path) def __iter__(self): yield from self._path @property def subsystems(self): """Subsystems in path. """ for name in self._path: obj = self._system.get_object(name) if obj.SORT == "Subsystem": yield obj @property def couplings(self): """Couplings in path. """ for name in self._path: obj = self._system.get_object(name) if obj.SORT == "Coupling": yield obj @property def energy_ratio(self): """Energy ratio :math:`\\frac{E_n}{_{1}}`. See Craik, equation 6.43. """ clf = (obj.clf for obj in self.couplings) tlf = (obj.tlf for obj in self.subsystems) return product(clf) / product(tlf) @property def level_difference(self): """Attenuation along path. .. math:: 10 \\log_{10}{\\frac{E_1}{E_n}} See Craik, equation 6.44 """ return -10.0 * np.log10(self.energy_ratio) # Note the minus sign! def energy_due_to_excitation(self, excitation=None): """Energy in subsystem due to excitation. :param excitation: Excitation. Valid arguments are: * 'None', in which case the total power input to the first subsystem is considered; * an iterable with names or excitation objects; * a name of the excitation; * an excitation object. :returns: Energy in a subsystem. """ angular = self._system.frequency.angular subsystem = toolz.first(self.subsystems) if excitation is None: power = subsystem.power_input elif toolz.isiterable(excitation): excitations = excitation power = np.zeros(len(self._system.frequency)) for excitation in excitations: excitation = self._system.get_object(excitation) if excitation.subsystem == subsystem: power += excitation.power else: raise ValueError( "Invalid excitation. The excitation {} is not connected to the first subsystem of this path, {}.".format( excitation.name, subsystem.name ) ) else: excitation = self._system.get_object(excitation) if excitation.subsystem == subsystem: power = excitation.power else: raise ValueError( "Invalid excitation. The excitation {} is not connected to the first subsystem of this path, {}.".format( excitation.name, subsystem.name ) ) return power / angular * self.energy_ratio class PathAnalysis(object): """Path analysis. """ def __init__(self, system): self._system = system def graph(self, objects=None): """Draw a graph of types specified in `objects`. .. note:: All objects are treated as nodes. """ system = self._system G = nx.DiGraph() for sort in objects: nodes = (obj.name for obj in getattr(system, sort)) # +'s')) G.add_nodes_from(nodes) if "components" in objects and "subsystems" in objects: edges = ((obj.name, obj.component.name) for obj in system.subsystems) G.add_edges_from(edges) if "components" in objects and "materials" in objects: edges = ((obj.name, obj.component.name) for obj in system.materials) G.add_edges_from(edges) if "components" in objects and "junctions" in objects: edges = ((obj.name, obj.component.name) for obj in system.junctions) G.add_edges_from(edges) if "subsystems" in objects and "excitations" in objects: edges = ((obj.name, obj.subsystem.name) for obj in system.excitations) G.add_edges_from(edges) if "junctions" in objects and "couplings" in objects: edges = ((obj.name, obj.junction.name) for obj in system.couplings) G.add_edges_from(edges) if "subsystems" in objects and "couplings" in objects: for coupling in system.couplings: nx.classes.function.add_path( G, ( coupling.subsystem_from.name, coupling.name, coupling.subsystem_to.name, ), ) # edges = ((obj.subsystem_from.name, obj.subsystem_to.name) for obj in system.couplings) # G.add_edges_from(edges) # edges = ((obj.name, obj.subsystem_from.name) for obj in system.couplings) # G.add_edges_from(edges) # edges = ((obj.name, obj.subsystem_to.name) for obj in system.couplings) # G.add_edges_from(edges) return G def paths(self, subsystem_from, subsystem_to): """Determine all paths between specified subsystems. """ subsystem_from = self._system.get_object(subsystem_from) subsystem_to = self._system.get_object(subsystem_to) G = self.graph(objects=["subsystems", "couplings"]) yield from ( Path(self._system, path) for path in nx.all_simple_paths(G, subsystem_from.name, subsystem_to.name) ) def has_path(self, subsystem_from, subsystem_to): """Determine whether there is a connection between two subsystems. """ subsystem_from = self._system.get_object(subsystem_from) subsystem_to = self._system.get_object(subsystem_to) G = self.graph(objects=["subsystems", "couplings"]) return nx.has_path(G, subsystem_from.name, subsystem_to.name) # def energy_due_to_excitation(self, subsystem, excitation): # """Energy in `subsystem` due to `excitation` # See Craik, equation 6.47, page 163. # """ # angular = self._system.frequency.angular # power = self._system.get_object(excitation).power # energy = power / angular *
{ "content_hash": "31d706c7cb9fd9608c227ac2784899dd", "timestamp": "", "source": "github", "line_count": 314, "max_line_length": 129, "avg_line_length": 30.471337579617835, "alnum_prop": 0.5605142140468228, "repo_name": "FRidh/seapy", "id": "15055ef81a11a4cf28bb543c427ebfc6940a96ff", "size": "9568", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "seapy/tools.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "471" }, { "name": "Nix", "bytes": "1340" }, { "name": "Python", "bytes": "162191" } ], "symlink_target": "" }
import sip sip.setapi('QVariant',2) sip.setapi('QString',2) from PyQt4 import QtCore,QtGui from klusta_process_manager.config import get_klusta_path class FolderView(QtGui.QWidget): def __init__(self,model,parent=None): super(FolderView,self).__init__(parent) #Table (list of experiment) self.table=QtGui.QTableView(self) self.table.horizontalHeader().setVisible(False) self.table.verticalHeader().setVisible(False) self.table.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents) self.table.setShowGrid(False) vbar=self.table.verticalScrollBar() self.table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.table.setModel(model) self.table.selectionModel().selectionChanged.connect(self.on_selection_changed) #ListFile (contents of one experiment folder) self.listFile=QtGui.QTreeView(self) self.listFile.header().setResizeMode(QtGui.QHeaderView.ResizeToContents) self.listFile.header().setStretchLastSection(True) self.listFile.doubleClicked.connect(self.open_selected_file) #FileSytemModel linked to listFile self.folderModel=QtGui.QFileSystemModel(self) self.folderModel.setNameFilterDisables(False) #open klustaviewa self.processViewa=QtCore.QProcess() #dealing with the klusta environment (not perfect) env = QtCore.QProcess.systemEnvironment() itemToReplace=[item for item in env if item.startswith('PATH=')] for item in itemToReplace: newitem=item.replace('/anaconda/bin:','/anaconda/envs/klusta/bin:') newitem=newitem.replace('/miniconda/bin:','/miniconda/envs/klusta/bin:') env.remove(item) env.append(newitem) env.append("CONDA_DEFAULT_ENV=klusta") self.processViewa.setEnvironment(env) #hide label/Edit self.label_hide=QtGui.QLabel('Show only:') self.edit_hide=QtGui.QLineEdit() self.edit_hide.setPlaceholderText("type 'kwik, 'pos',... and press enter") self.edit_hide.returnPressed.connect(self.on_enter_press) #Layout self.space=QtGui.QWidget() hbox=QtGui.QHBoxLayout() hbox.addWidget(vbar) hbox.addWidget(self.table) hbox.addWidget(self.space) hbox.addWidget(self.listFile) self.setLayout(hbox) #display empty space (no folder selected) self.listFile.hide() self.label_hide.hide() self.edit_hide.hide() self.space.show() def refresh(self): self.listFile.update() def on_enter_press(self): text=self.edit_hide.text() filters=["*"+extension.strip() for extension in text.split(",")] self.edit_hide.clearFocus() self.folderModel.setNameFilters(filters) #User clicked on one folder def on_selection_changed(self,selected,deselected): if len(selected.indexes())==0: if len(self.table.selectedIndexes())==0: self.listFile.hide() self.label_hide.hide() self.edit_hide.hide() self.space.show() return else: lastIndex=self.table.selectedIndexes()[-1] else: lastIndex=selected.indexes()[-1] self.listFile.show() self.edit_hide.show() self.label_hide.show() self.space.hide() #Set ListFile to display folder's content path=lastIndex.model().pathLocal_from_index(lastIndex) self.folderModel.setRootPath(path) self.listFile.setModel(self.folderModel) self.listFile.setRootIndex(self.folderModel.index(path)) self.listFile.clearSelection() #user changed animal def reset_view(self): self.table.reset() self.table.clearSelection() self.folderModel.reset() self.listFile.hide() self.label_hide.hide() self.edit_hide.hide() self.listFile.clearSelection() self.space.show() self.table.resizeColumnsToContents() he=self.table.horizontalHeader() length=he.sectionSize(0)+he.sectionSize(1)+he.sectionSize(2)+he.sectionSize(3) self.table.setMaximumWidth(length+10) #double click on a file def open_selected_file(self,index): if self.folderModel.isDir(index): return path=self.folderModel.filePath(index) if path.endswith(".kwik"): self.open_klustaviewa(path) else: QtGui.QDesktopServices.openUrl(QtCore.QUrl(path)) def open_klustaviewa(self,path): pathKlustaviewa=get_klusta_path()+"viewa" isOpen=self.processViewa.startDetached(pathKlustaviewa,[path]) if not isOpen: QtGui.QMessageBox.warning(self,"error", "Could not open Klustaviewa (path=%s)"%pathKlustaviewa, QtGui.QMessageBox.Ok)
{ "content_hash": "5b860d3b9c6b4745ee292a6aa003379e", "timestamp": "", "source": "github", "line_count": 133, "max_line_length": 81, "avg_line_length": 32.21804511278196, "alnum_prop": 0.7458576429404901, "repo_name": "tymoreau/klusta_process_manager", "id": "855d99a5a0872012846f836655e7a73a43999f23", "size": "4289", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "klusta_process_manager/fileBrowser/folderView.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "101103" } ], "symlink_target": "" }
""" Session Management (from web.py) """ import os, time, datetime, random, base64 import os.path try: import cPickle as pickle except ImportError: import pickle try: import hashlib sha1 = hashlib.sha1 except ImportError: import sha sha1 = sha.new import utils import webapi as web __all__ = [ 'Session', 'SessionExpired', 'Store', 'DiskStore', 'DBStore', ] web.config.session_parameters = utils.storage({ 'cookie_name': 'webpy_session_id', 'cookie_domain': None, 'timeout': 86400, #24 * 60 * 60, # 24 hours in seconds 'ignore_expiry': True, 'ignore_change_ip': True, 'secret_key': 'fLjUfxqXtfNoIldA0A0J', 'expired_message': 'Session expired', 'httponly': True }) class SessionExpired(web.HTTPError): def __init__(self, message): web.HTTPError.__init__(self, '200 OK', {}, data=message) class Session(object): """Session management for web.py """ __slots__ = [ "store", "_initializer", "_last_cleanup_time", "_config", "_data", "__getitem__", "__setitem__", "__delitem__" ] def __init__(self, app, store, initializer=None): self.store = store self._initializer = initializer self._last_cleanup_time = 0 self._config = utils.storage(web.config.session_parameters) self._data = utils.threadeddict() self.__getitem__ = self._data.__getitem__ self.__setitem__ = self._data.__setitem__ self.__delitem__ = self._data.__delitem__ if app: app.add_processor(self._processor) def __getattr__(self, name): return getattr(self._data, name) def __setattr__(self, name, value): if name in self.__slots__: object.__setattr__(self, name, value) else: setattr(self._data, name, value) def __delattr__(self, name): delattr(self._data, name) def _processor(self, handler): """Application processor to setup session for every request""" self._cleanup() self._load() try: return handler() finally: self._save() def _load(self): """Load the session from the store, by the id from cookie""" cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain httponly = self._config.httponly self.session_id = web.cookies().get(cookie_name) # protection against session_id tampering if self.session_id and not self._valid_session_id(self.session_id): self.session_id = None self._check_expiry() if self.session_id: d = self.store[self.session_id] self.update(d) self._validate_ip() if not self.session_id: self.session_id = self._generate_session_id() if self._initializer: if isinstance(self._initializer, dict): self.update(self._initializer) elif hasattr(self._initializer, '__call__'): self._initializer() self.ip = web.ctx.ip def _check_expiry(self): # check for expiry if self.session_id and self.session_id not in self.store: if self._config.ignore_expiry: self.session_id = None else: return self.expired() def _validate_ip(self): # check for change of IP if self.session_id and self.get('ip', None) != web.ctx.ip: if not self._config.ignore_change_ip: return self.expired() def _save(self): cookie_name = self._config.cookie_name cookie_domain = self._config.cookie_domain httponly = self._config.httponly if not self.get('_killed'): web.setcookie(cookie_name, self.session_id, domain=cookie_domain, httponly=httponly) self.store[self.session_id] = dict(self._data) else: web.setcookie(cookie_name, self.session_id, expires=-1, domain=cookie_domain, httponly=httponly) def _generate_session_id(self): """Generate a random id for session""" while True: rand = os.urandom(16) now = time.time() secret_key = self._config.secret_key session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key)) session_id = session_id.hexdigest() if session_id not in self.store: break return session_id def _valid_session_id(self, session_id): rx = utils.re_compile('^[0-9a-fA-F]+$') return rx.match(session_id) def _cleanup(self): """Cleanup the stored sessions""" current_time = time.time() timeout = self._config.timeout if current_time - self._last_cleanup_time > timeout: self.store.cleanup(timeout) self._last_cleanup_time = current_time def expired(self): """Called when an expired session is atime""" self._killed = True self._save() raise SessionExpired(self._config.expired_message) def kill(self): """Kill the session, make it no longer available""" del self.store[self.session_id] self._killed = True class Store: """Base class for session stores""" def __contains__(self, key): raise NotImplementedError def __getitem__(self, key): raise NotImplementedError def __setitem__(self, key, value): raise NotImplementedError def cleanup(self, timeout): """removes all the expired sessions""" raise NotImplementedError def encode(self, session_dict): """encodes session dict as a string""" pickled = pickle.dumps(session_dict) return base64.encodestring(pickled) def decode(self, session_data): """decodes the data to get back the session dict """ pickled = base64.decodestring(session_data) return pickle.loads(pickled) class DiskStore(Store): """ Store for saving a session on disk. >>> import tempfile >>> root = tempfile.mkdtemp() >>> s = DiskStore(root) >>> s['a'] = 'foo' >>> s['a'] 'foo' >>> time.sleep(0.01) >>> s.cleanup(0.01) >>> s['a'] Traceback (most recent call last): ... KeyError: 'a' """ def __init__(self, root): # if the storage root doesn't exists, create it. if not os.path.exists(root): os.makedirs( os.path.abspath(root) ) self.root = root def _get_path(self, key): if os.path.sep in key: raise ValueError, "Bad key: %s" % repr(key) return os.path.join(self.root, key) def __contains__(self, key): path = self._get_path(key) return os.path.exists(path) def __getitem__(self, key): path = self._get_path(key) if os.path.exists(path): pickled = open(path).read() return self.decode(pickled) else: raise KeyError, key def __setitem__(self, key, value): path = self._get_path(key) pickled = self.encode(value) try: f = open(path, 'w') try: f.write(pickled) finally: f.close() except IOError: pass def __delitem__(self, key): path = self._get_path(key) if os.path.exists(path): os.remove(path) def cleanup(self, timeout): now = time.time() for f in os.listdir(self.root): path = self._get_path(f) atime = os.stat(path).st_atime if now - atime > timeout : os.remove(path) class DBStore(Store): """Store for saving a session in database Needs a table with the following columns: session_id CHAR(128) UNIQUE NOT NULL, atime DATETIME NOT NULL default current_timestamp, data TEXT """ def __init__(self, db, table_name): self.db = db self.table = table_name def __contains__(self, key): data = self.db.select(self.table, where="session_id=$key", vars=locals()) return bool(list(data)) def __getitem__(self, key): now = datetime.datetime.now() try: s = self.db.select(self.table, where="session_id=$key", vars=locals())[0] self.db.update(self.table, where="session_id=$key", atime=now, vars=locals()) except IndexError: raise KeyError else: return self.decode(s.data) def __setitem__(self, key, value): pickled = self.encode(value) now = datetime.datetime.now() if key in self: self.db.update(self.table, where="session_id=$key", data=pickled, vars=locals()) else: self.db.insert(self.table, False, session_id=key, data=pickled ) def __delitem__(self, key): self.db.delete(self.table, where="session_id=$key", vars=locals()) def cleanup(self, timeout): timeout = datetime.timedelta(timeout/(24.0*60*60)) #timedelta takes numdays as arg last_allowed_time = datetime.datetime.now() - timeout self.db.delete(self.table, where="$last_allowed_time > atime", vars=locals()) class ShelfStore: """Store for saving session using `shelve` module. import shelve store = ShelfStore(shelve.open('session.shelf')) XXX: is shelve thread-safe? """ def __init__(self, shelf): self.shelf = shelf def __contains__(self, key): return key in self.shelf def __getitem__(self, key): atime, v = self.shelf[key] self[key] = v # update atime return v def __setitem__(self, key, value): self.shelf[key] = time.time(), value def __delitem__(self, key): try: del self.shelf[key] except KeyError: pass def cleanup(self, timeout): now = time.time() for k in self.shelf.keys(): atime, v = self.shelf[k] if now - atime > timeout : del self[k] if __name__ == '__main__' : import doctest doctest.testmod()
{ "content_hash": "9d18de94b25616bc3621093568cbdab1", "timestamp": "", "source": "github", "line_count": 347, "max_line_length": 108, "avg_line_length": 29.953890489913544, "alnum_prop": 0.555416586492207, "repo_name": "ruishihan/R7-with-notes", "id": "ab75aca9a6caf62fecee7b69dd794ea661751684", "size": "10394", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/python/web/session.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "29299" }, { "name": "C++", "bytes": "28623" }, { "name": "CSS", "bytes": "1787" }, { "name": "Coq", "bytes": "1494" }, { "name": "HTML", "bytes": "5795" }, { "name": "JavaScript", "bytes": "2024757" }, { "name": "Makefile", "bytes": "2315" }, { "name": "Matlab", "bytes": "38436" }, { "name": "Python", "bytes": "491683" }, { "name": "Shell", "bytes": "2458" }, { "name": "Tcl", "bytes": "12977" }, { "name": "Verilog", "bytes": "38673" } ], "symlink_target": "" }
from collections import OrderedDict from indy_common.types import ClientGetRevocRegDeltaField from plenum.common.messages.fields import NonEmptyStringField, ConstantField, IntegerField from indy_common.constants import TYPE, REVOC_REG_DEF_ID, FROM, TO, GET_REVOC_REG_DELTA EXPECTED_GET_REVOC_REG_DELTA_FIELD = OrderedDict([ (TYPE, ConstantField(GET_REVOC_REG_DELTA)), (REVOC_REG_DEF_ID, NonEmptyStringField), (FROM, IntegerField), (TO, IntegerField), ]) def test_get_revoc_reg_def_schema(): actual_field_name = OrderedDict(ClientGetRevocRegDeltaField.schema).keys() assert EXPECTED_GET_REVOC_REG_DELTA_FIELD.keys() == actual_field_name
{ "content_hash": "cd7fa24d110689ae9b2591bfde2c077c", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 90, "avg_line_length": 39.05882352941177, "alnum_prop": 0.7695783132530121, "repo_name": "spivachuk/sovrin-node", "id": "34b8996af563d09c669c68744dfcdb3ed296d930", "size": "664", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "indy_common/test/types/test_get_revoc_reg_delta_schema.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3329" }, { "name": "Dockerfile", "bytes": "7269" }, { "name": "Groovy", "bytes": "8984" }, { "name": "Makefile", "bytes": "11151" }, { "name": "Python", "bytes": "1681637" }, { "name": "Ruby", "bytes": "65393" }, { "name": "Rust", "bytes": "25532" }, { "name": "Shell", "bytes": "132633" } ], "symlink_target": "" }
""" Split a hex file signed by imagetool into its binary/image and its header. This is needed to be able to pack these two parts into the sample separately, saving flash space. """ import argparse from intelhex import IntelHex def dump_header(infile, image, header): inhex = IntelHex(infile) (start, end) = inhex.segments()[0] inhex.tobinfile(image, start=start, end=end-1) (start, end) = inhex.segments()[-1] inhex.tobinfile(header, start=start, end=end-1) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('input') parser.add_argument('image') parser.add_argument('header') args = parser.parse_args() dump_header(args.input, args.image, args.header)
{ "content_hash": "b0cca28f707e0295167442ac74d9b009", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 62, "avg_line_length": 31.782608695652176, "alnum_prop": 0.6922024623803009, "repo_name": "galak/zephyr", "id": "8bf9841d82d132093cc16e9b122b0d7c2e934162", "size": "825", "binary": false, "copies": "4", "ref": "refs/heads/main", "path": "samples/tfm_integration/psa_firmware/split-header.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "445128" }, { "name": "Batchfile", "bytes": "110" }, { "name": "C", "bytes": "45081460" }, { "name": "C++", "bytes": "29379" }, { "name": "CMake", "bytes": "1396197" }, { "name": "Cadence", "bytes": "1501" }, { "name": "EmberScript", "bytes": "997" }, { "name": "Forth", "bytes": "1648" }, { "name": "GDB", "bytes": "1285" }, { "name": "Haskell", "bytes": "722" }, { "name": "JetBrains MPS", "bytes": "3152" }, { "name": "PLSQL", "bytes": "281" }, { "name": "Perl", "bytes": "215338" }, { "name": "Python", "bytes": "2267025" }, { "name": "Shell", "bytes": "173704" }, { "name": "SmPL", "bytes": "36840" }, { "name": "Smalltalk", "bytes": "1885" }, { "name": "SourcePawn", "bytes": "14890" }, { "name": "Tcl", "bytes": "7034" }, { "name": "VBA", "bytes": "294" }, { "name": "Verilog", "bytes": "6394" } ], "symlink_target": "" }
import pkg_resources from .elements.const import * import os LIB_PATH = pkg_resources.resource_filename('antlia', 'lib/') os.environ["PYSDL2_DLL_PATH"] = LIB_PATH try: import sdl2 except ImportError: import traceback traceback.print_exc() sys.exit(1) def changeCursor(t): if t == WAIT: cursor = sdl2.SDL_CreateSystemCursor(sdl2.SDL_SYSTEM_CURSOR_WAIT) elif t == TEXT: cursor = sdl2.SDL_CreateSystemCursor(sdl2.SDL_SYSTEM_CURSOR_IBEAM) else: cursor = sdl2.SDL_CreateSystemCursor(sdl2.SDL_SYSTEM_CURSOR_ARROW) sdl2.SDL_SetCursor(cursor)
{ "content_hash": "95a6783e499e87e2e9c81955f7acd166", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 68, "avg_line_length": 25.09090909090909, "alnum_prop": 0.75, "repo_name": "Romaingin/Antlia", "id": "24f208b8d154b4f938db3b8f530b4889269e87c6", "size": "552", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "antlia/cursor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "100508" } ], "symlink_target": "" }
import os import codecs import json import pprint import sqlite3 import platform fold_base = 'data_row' #fold_base = 'test_data_row' jobs_target = 'topsy_fortuner_twibes' client_id = platform.node() print client_id sql_init = ''' CREATE TABLE IF NOT EXISTS tweets ( tweet_id TEXT NOT NULL UNIQUE, tweet_url TEXT NOT NULL, tw_username TEXT NOT NULL, tweet_firstpost_date TEXT NOT NULL, tweet_title TEXT NOT NULL, tweet_content TEXT NOT NULL, tweet_topsy_full TEXT NOT NULL ); ''' def db_get(): db = sqlite3.connect('../dbs/%s==%s.db'%(jobs_target, client_id)) return db def db_init(db, sql_init): c = db.cursor() c.executescript(sql_init) db.commit() c.execute('SELECT * FROM SQLITE_MASTER') tables = c.fetchall() print 'db tables: ', tables print '=============================' c.close() db = db_get() db_init(db, sql_init) def parse_file(p, tw_username): f = codecs.open(p, 'r', encoding='utf-8') j = json.loads(f.read()) if not j.has_key('response'): return if not j['response'].has_key('list'): return c = db.cursor() for r in j['response']['list']: firstpost_date = r['firstpost_date'] title = r['title'] content = r['content'] url = r['url'] ls = url.split('/') tweet_id = ls[len(ls)-1] sql = 'INSERT OR IGNORE INTO tweets (tweet_id, tweet_url, tw_username, tweet_firstpost_date, tweet_title, tweet_content, tweet_topsy_full) VALUES (?,?,?,?,?,?,?)' c.execute(sql, (tweet_id, url, tw_username, firstpost_date, title, content, json.dumps(r))) db.commit() c.close() def loop_dir(p): dir_lists = os.listdir(p) t = len(dir_lists) k = 0 for dir_list in dir_lists: file_full_path = os.path.join(p, dir_list) t = 0 if os.path.isdir(file_full_path): print '****** is dir', file_full_path fileName, fileExtension = os.path.splitext(dir_list) #print fileName, fileExtension if fileExtension != '.html': continue ls = fileName.strip().split('=') page_number = ls[0] twibes_rank = ls[1] tw_username = ls[2] print page_number, tw_username parse_file(file_full_path, tw_username) if __name__ == '__main__': p = '../../%s/web_jobs/%s/'%(fold_base, jobs_target) loop_dir(p)
{ "content_hash": "497d6890e035bb1b5667b7819f2145e5", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 164, "avg_line_length": 24.75862068965517, "alnum_prop": 0.6471680594243269, "repo_name": "jianhuashao/WebDownloadJobsManage", "id": "ba80de79ddc649e1960265a0452a3cbf216256ab", "size": "2154", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "data_row_process/topsy_fortuner_twibes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "109468" }, { "name": "Python", "bytes": "722505" } ], "symlink_target": "" }
from django.contrib import admin from tinycontent.models import TinyContent, TinyContentFileUpload class TinyContentAdmin(admin.ModelAdmin): list_display = ('name', ) search_fields = ('name', 'content', ) admin.site.register(TinyContent, TinyContentAdmin) class TinyContentFileUploadAdmin(admin.ModelAdmin): list_display = ('name', 'slug', ) search_fields = ('name', ) admin.site.register(TinyContentFileUpload, TinyContentFileUploadAdmin)
{ "content_hash": "686e1b148ef166a6fbcf3ab3a02a3f07", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 70, "avg_line_length": 25.77777777777778, "alnum_prop": 0.7543103448275862, "repo_name": "dominicrodger/django-tinycontent", "id": "bfd9beb93e354f3dc99d409fae2cea958f2ec342", "size": "464", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tinycontent/admin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "588" }, { "name": "Makefile", "bytes": "976" }, { "name": "Python", "bytes": "29459" } ], "symlink_target": "" }
import pytest from jinja2 import DictLoader from jinja2 import Environment from jinja2 import TemplateRuntimeError from jinja2 import TemplateSyntaxError LAYOUTTEMPLATE = """\ |{% block block1 %}block 1 from layout{% endblock %} |{% block block2 %}block 2 from layout{% endblock %} |{% block block3 %} {% block block4 %}nested block 4 from layout{% endblock %} {% endblock %}|""" LEVEL1TEMPLATE = """\ {% extends "layout" %} {% block block1 %}block 1 from level1{% endblock %}""" LEVEL2TEMPLATE = """\ {% extends "level1" %} {% block block2 %}{% block block5 %}nested block 5 from level2{% endblock %}{% endblock %}""" LEVEL3TEMPLATE = """\ {% extends "level2" %} {% block block5 %}block 5 from level3{% endblock %} {% block block4 %}block 4 from level3{% endblock %} """ LEVEL4TEMPLATE = """\ {% extends "level3" %} {% block block3 %}block 3 from level4{% endblock %} """ WORKINGTEMPLATE = """\ {% extends "layout" %} {% block block1 %} {% if false %} {% block block2 %} this should work {% endblock %} {% endif %} {% endblock %} """ DOUBLEEXTENDS = """\ {% extends "layout" %} {% extends "layout" %} {% block block1 %} {% if false %} {% block block2 %} this should work {% endblock %} {% endif %} {% endblock %} """ @pytest.fixture def env(): return Environment( loader=DictLoader( { "layout": LAYOUTTEMPLATE, "level1": LEVEL1TEMPLATE, "level2": LEVEL2TEMPLATE, "level3": LEVEL3TEMPLATE, "level4": LEVEL4TEMPLATE, "working": WORKINGTEMPLATE, "doublee": DOUBLEEXTENDS, } ), trim_blocks=True, ) class TestInheritance: def test_layout(self, env): tmpl = env.get_template("layout") assert tmpl.render() == ( "|block 1 from layout|block 2 from layout|nested block 4 from layout|" ) def test_level1(self, env): tmpl = env.get_template("level1") assert tmpl.render() == ( "|block 1 from level1|block 2 from layout|nested block 4 from layout|" ) def test_level2(self, env): tmpl = env.get_template("level2") assert tmpl.render() == ( "|block 1 from level1|nested block 5 from " "level2|nested block 4 from layout|" ) def test_level3(self, env): tmpl = env.get_template("level3") assert tmpl.render() == ( "|block 1 from level1|block 5 from level3|block 4 from level3|" ) def test_level4(self, env): tmpl = env.get_template("level4") assert tmpl.render() == ( "|block 1 from level1|block 5 from level3|block 3 from level4|" ) def test_super(self, env): env = Environment( loader=DictLoader( { "a": "{% block intro %}INTRO{% endblock %}|" "BEFORE|{% block data %}INNER{% endblock %}|AFTER", "b": '{% extends "a" %}{% block data %}({{ ' "super() }}){% endblock %}", "c": '{% extends "b" %}{% block intro %}--{{ ' "super() }}--{% endblock %}\n{% block data " "%}[{{ super() }}]{% endblock %}", } ) ) tmpl = env.get_template("c") assert tmpl.render() == "--INTRO--|BEFORE|[(INNER)]|AFTER" def test_working(self, env): env.get_template("working") def test_reuse_blocks(self, env): tmpl = env.from_string( "{{ self.foo() }}|{% block foo %}42{% endblock %}|{{ self.foo() }}" ) assert tmpl.render() == "42|42|42" def test_preserve_blocks(self, env): env = Environment( loader=DictLoader( { "a": "{% if false %}{% block x %}A{% endblock %}" "{% endif %}{{ self.x() }}", "b": '{% extends "a" %}{% block x %}B{{ super() }}{% endblock %}', } ) ) tmpl = env.get_template("b") assert tmpl.render() == "BA" def test_dynamic_inheritance(self, env): env = Environment( loader=DictLoader( { "default1": "DEFAULT1{% block x %}{% endblock %}", "default2": "DEFAULT2{% block x %}{% endblock %}", "child": "{% extends default %}{% block x %}CHILD{% endblock %}", } ) ) tmpl = env.get_template("child") for m in range(1, 3): assert tmpl.render(default=f"default{m}") == f"DEFAULT{m}CHILD" def test_multi_inheritance(self, env): env = Environment( loader=DictLoader( { "default1": "DEFAULT1{% block x %}{% endblock %}", "default2": "DEFAULT2{% block x %}{% endblock %}", "child": ( "{% if default %}{% extends default %}{% else %}" "{% extends 'default1' %}{% endif %}" "{% block x %}CHILD{% endblock %}" ), } ) ) tmpl = env.get_template("child") assert tmpl.render(default="default2") == "DEFAULT2CHILD" assert tmpl.render(default="default1") == "DEFAULT1CHILD" assert tmpl.render() == "DEFAULT1CHILD" def test_scoped_block(self, env): env = Environment( loader=DictLoader( { "default.html": "{% for item in seq %}[{% block item scoped %}" "{% endblock %}]{% endfor %}" } ) ) t = env.from_string( "{% extends 'default.html' %}{% block item %}{{ item }}{% endblock %}" ) assert t.render(seq=list(range(5))) == "[0][1][2][3][4]" def test_super_in_scoped_block(self, env): env = Environment( loader=DictLoader( { "default.html": "{% for item in seq %}[{% block item scoped %}" "{{ item }}{% endblock %}]{% endfor %}" } ) ) t = env.from_string( '{% extends "default.html" %}{% block item %}' "{{ super() }}|{{ item * 2 }}{% endblock %}" ) assert t.render(seq=list(range(5))) == "[0|0][1|2][2|4][3|6][4|8]" def test_scoped_block_after_inheritance(self, env): env = Environment( loader=DictLoader( { "layout.html": """ {% block useless %}{% endblock %} """, "index.html": """ {%- extends 'layout.html' %} {% from 'helpers.html' import foo with context %} {% block useless %} {% for x in [1, 2, 3] %} {% block testing scoped %} {{ foo(x) }} {% endblock %} {% endfor %} {% endblock %} """, "helpers.html": """ {% macro foo(x) %}{{ the_foo + x }}{% endmacro %} """, } ) ) rv = env.get_template("index.html").render(the_foo=42).split() assert rv == ["43", "44", "45"] def test_level1_required(self, env): env = Environment( loader=DictLoader( { "default": "{% block x required %}{# comment #}\n {% endblock %}", "level1": "{% extends 'default' %}{% block x %}[1]{% endblock %}", } ) ) rv = env.get_template("level1").render() assert rv == "[1]" def test_level2_required(self, env): env = Environment( loader=DictLoader( { "default": "{% block x required %}{% endblock %}", "level1": "{% extends 'default' %}{% block x %}[1]{% endblock %}", "level2": "{% extends 'default' %}{% block x %}[2]{% endblock %}", } ) ) rv1 = env.get_template("level1").render() rv2 = env.get_template("level2").render() assert rv1 == "[1]" assert rv2 == "[2]" def test_level3_required(self, env): env = Environment( loader=DictLoader( { "default": "{% block x required %}{% endblock %}", "level1": "{% extends 'default' %}", "level2": "{% extends 'level1' %}{% block x %}[2]{% endblock %}", "level3": "{% extends 'level2' %}", } ) ) t1 = env.get_template("level1") t2 = env.get_template("level2") t3 = env.get_template("level3") with pytest.raises(TemplateRuntimeError, match="Required block 'x' not found"): assert t1.render() assert t2.render() == "[2]" assert t3.render() == "[2]" def test_invalid_required(self, env): env = Environment( loader=DictLoader( { "default": "{% block x required %}data {# #}{% endblock %}", "default1": "{% block x required %}{% block y %}" "{% endblock %} {% endblock %}", "default2": "{% block x required %}{% if true %}" "{% endif %} {% endblock %}", "level1": "{% if default %}{% extends default %}" "{% else %}{% extends 'default' %}{% endif %}" "{%- block x %}CHILD{% endblock %}", } ) ) t = env.get_template("level1") with pytest.raises( TemplateSyntaxError, match="Required blocks can only contain comments or whitespace", ): assert t.render(default="default") assert t.render(default="default2") assert t.render(default="default3") def test_required_with_scope(self, env): env = Environment( loader=DictLoader( { "default1": "{% for item in seq %}[{% block item scoped required %}" "{% endblock %}]{% endfor %}", "child1": "{% extends 'default1' %}{% block item %}" "{{ item }}{% endblock %}", "default2": "{% for item in seq %}[{% block item required scoped %}" "{% endblock %}]{% endfor %}", "child2": "{% extends 'default2' %}{% block item %}" "{{ item }}{% endblock %}", } ) ) t1 = env.get_template("child1") t2 = env.get_template("child2") assert t1.render(seq=list(range(3))) == "[0][1][2]" # scoped must come before required with pytest.raises(TemplateSyntaxError): t2.render(seq=list(range(3))) def test_duplicate_required_or_scoped(self, env): env = Environment( loader=DictLoader( { "default1": "{% for item in seq %}[{% block item " "scoped scoped %}}{{% endblock %}}]{{% endfor %}}", "default2": "{% for item in seq %}[{% block item " "required required %}}{{% endblock %}}]{{% endfor %}}", "child": "{% if default %}{% extends default %}{% else %}" "{% extends 'default1' %}{% endif %}{%- block x %}" "CHILD{% endblock %}", } ) ) tmpl = env.get_template("child") with pytest.raises(TemplateSyntaxError): tmpl.render(default="default1", seq=list(range(3))) tmpl.render(default="default2", seq=list(range(3))) class TestBugFix: def test_fixed_macro_scoping_bug(self, env): assert ( Environment( loader=DictLoader( { "test.html": """\ {% extends 'details.html' %} {% macro my_macro() %} my_macro {% endmacro %} {% block inner_box %} {{ my_macro() }} {% endblock %} """, "details.html": """\ {% extends 'standard.html' %} {% macro my_macro() %} my_macro {% endmacro %} {% block content %} {% block outer_box %} outer_box {% block inner_box %} inner_box {% endblock %} {% endblock %} {% endblock %} """, "standard.html": """ {% block content %}&nbsp;{% endblock %} """, } ) ) .get_template("test.html") .render() .split() == ["outer_box", "my_macro"] ) def test_double_extends(self, env): """Ensures that a template with more than 1 {% extends ... %} usage raises a ``TemplateError``. """ with pytest.raises(TemplateRuntimeError, match="extended multiple times"): env.get_template("doublee").render()
{ "content_hash": "3cc06bea2071f2137910c65721c4b336", "timestamp": "", "source": "github", "line_count": 405, "max_line_length": 88, "avg_line_length": 33.03950617283951, "alnum_prop": 0.4450340034377102, "repo_name": "mitsuhiko/jinja2", "id": "0c20d4da7d3c29e36bf8c7fb36c747b069b6f6e5", "size": "13381", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "tests/test_inheritance.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Emacs Lisp", "bytes": "4733" }, { "name": "HTML", "bytes": "1729" }, { "name": "Makefile", "bytes": "584" }, { "name": "Python", "bytes": "956171" }, { "name": "VimL", "bytes": "5637" } ], "symlink_target": "" }
from core import * class InstancePool(object): """ Container for instances of Instance and Entity, that provides some utility methods to search through them. """ def __init__(self): self._items = [] # Handle special case of the top-most classes of the instantiation chain (Entity and Instance). # They are not loaded explicitly, and are always available from any pool. self.add(Entity) self.add(Instance) def add(self, instance): id = instance.get_id() if not id is None and not self.get_item(id) is None: # raising an exception is an option. an alternative would be to silently replace the instance with the one # being loaded, but there may be implications when working with multiple versions of a same instance raise Exception("Instance with id '%s' already exists in the pool" % (instance.get_id(),)) self._items.append(instance) def remove(self, item): self._items.remove(item) def get_item(self, id): assert(not id is None) for item in self._items: if item.get_id() == id: return item return None def get_items(self, levels=(0,1,2), base_name=None): result = self._items if not levels == (0,1,2): result = [item for item in result if isinstance(item, Instance) and 0 in levels or isinstance(item, Entity) and 1 in levels or item in (Entity, Instance) and 2 in levels] if not base_name is None: base = self.get_item(base_name) result = [item for item in result if isinstance(item, Entity) and len(item.__bases__) > 0 and item.__bases__[0] is base] return result def get_instances_of(self, spec_name, direct_instances_only=False): assert(not spec_name is None) if direct_instances_only: return [item for item in self._items if hasattr(item.__class__, 'name') and item.__class__.name == spec_name] else: spec_and_childs = self.get_spec_and_child_specs(spec_name) return [item for item in self._items if item.__class__ in spec_and_childs] def get_spec_and_child_specs(self, spec_name): inh_chain = current = [self.get_item(spec_name)] while True: childs = [self.get_items(base_name=spec.get_name()) for spec in current] current = [child for sublist in childs for child in sublist] if len(current) == 0: break inh_chain.extend(current) return inh_chain def get_possible_domains(self): pool = self possible_domains = {'string':'string'} possible_domains.update(dict([(i.get_identifier(), i.get_name()) for i in pool.get_items(levels=(1,))])) return possible_domains
{ "content_hash": "4913e4e1b2105d3dd23832f6c1514e1f", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 132, "avg_line_length": 41.91304347826087, "alnum_prop": 0.6023513139695712, "repo_name": "filipefigcorreia/TracAdaptiveSoftwareArtifacts", "id": "f7abf30589ea4193a84ae912031b514e3f00a1ac", "size": "3048", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "AdaptiveArtifacts/model/pool.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "46346" }, { "name": "JavaScript", "bytes": "6566417" }, { "name": "PHP", "bytes": "32097" }, { "name": "Python", "bytes": "142917" } ], "symlink_target": "" }
import logging from aquilon.aqdb.model import (Cluster, EsxCluster, ComputeCluster, StorageCluster) from aquilon.worker.templates.base import Plenary, PlenaryCollection from aquilon.worker.templates.panutils import (StructureTemplate, PanValue, pan_assign, pan_include, pan_append) from aquilon.worker.locks import CompileKey LOGGER = logging.getLogger(__name__) class PlenaryCluster(PlenaryCollection): """ A facade for the variety of PlenaryCluster subsidiary files """ def __init__(self, dbcluster, logger=LOGGER): PlenaryCollection.__init__(self, logger=logger) self.dbobj = dbcluster self.plenaries.append(PlenaryClusterObject(dbcluster, logger=logger)) self.plenaries.append(PlenaryClusterData(dbcluster, logger=logger)) self.plenaries.append(PlenaryClusterClient(dbcluster, logger=logger)) Plenary.handlers[Cluster] = PlenaryCluster Plenary.handlers[ComputeCluster] = PlenaryCluster Plenary.handlers[EsxCluster] = PlenaryCluster Plenary.handlers[StorageCluster] = PlenaryCluster class PlenaryClusterData(Plenary): template_type = "structure" def __init__(self, dbcluster, logger=LOGGER): Plenary.__init__(self, dbcluster, logger=logger) self.name = dbcluster.name if dbcluster.metacluster: self.metacluster = dbcluster.metacluster.name else: self.metacluster = None self.plenary_core = "clusterdata" self.plenary_template = dbcluster.name def get_key(self): return CompileKey(domain=self.dbobj.branch.name, profile=self.plenary_template_name, logger=self.logger) def body(self, lines): pan_assign(lines, "system/cluster/name", self.name) pan_assign(lines, "system/cluster/type", self.dbobj.cluster_type) dbloc = self.dbobj.location_constraint pan_assign(lines, "system/cluster/sysloc/location", dbloc.sysloc()) if dbloc.continent: pan_assign(lines, "system/cluster/sysloc/continent", dbloc.continent.name) if dbloc.city: pan_assign(lines, "system/cluster/sysloc/city", dbloc.city.name) if dbloc.campus: pan_assign(lines, "system/cluster/sysloc/campus", dbloc.campus.name) ## maintaining this so templates dont break ## during transtion period.. should be DEPRECATED pan_assign(lines, "system/cluster/campus", dbloc.campus.name) if dbloc.building: pan_assign(lines, "system/cluster/sysloc/building", dbloc.building.name) if dbloc.bunker: pan_assign(lines, "system/cluster/sysloc/bunker", dbloc.bunker.name) if dbloc.rack: pan_assign(lines, "system/cluster/rack/row", dbloc.rack.rack_row) pan_assign(lines, "system/cluster/rack/column", dbloc.rack.rack_column) pan_assign(lines, "system/cluster/rack/name", dbloc.rack.name) if dbloc.room: pan_assign(lines, "system/cluster/rack/room", dbloc.room) pan_assign(lines, "system/cluster/down_hosts_threshold", self.dbobj.dht_value) if self.dbobj.dmt_value is not None: pan_assign(lines, "system/cluster/down_maint_threshold", self.dbobj.dmt_value) if self.dbobj.down_hosts_percent: pan_assign(lines, "system/cluster/down_hosts_percent", self.dbobj.down_hosts_threshold) pan_assign(lines, "system/cluster/down_hosts_as_percent", self.dbobj.down_hosts_percent) if self.dbobj.down_maint_percent: pan_assign(lines, "system/cluster/down_maint_percent", self.dbobj.down_maint_threshold) pan_assign(lines, "system/cluster/down_maint_as_percent", self.dbobj.down_maint_percent) lines.append("") # Only use system names here to avoid circular dependencies. # Other templates that needs to look up the underlying values use: # foreach(idx; host; value("system/cluster/members")) { # v = value("/" + host + "/system/foo/bar/baz"); # ); pan_assign(lines, "system/cluster/members", sorted([member.fqdn for member in self.dbobj.hosts])) lines.append("") if self.dbobj.resholder: for resource in sorted(self.dbobj.resholder.resources): pan_append(lines, "system/resources/" + resource.resource_type, StructureTemplate(resource.template_base + '/config')) pan_assign(lines, "system/build", self.dbobj.status.name) if self.dbobj.allowed_personalities: pan_assign(lines, "system/cluster/allowed_personalities", sorted(["%s/%s" % (p.archetype.name, p.name) for p in self.dbobj.allowed_personalities])) fname = "body_%s" % self.dbobj.cluster_type if hasattr(self, fname): getattr(self, fname)(lines) def body_esx(self, lines): if self.metacluster: pan_assign(lines, "system/metacluster/name", self.metacluster) pan_assign(lines, "system/cluster/ratio", [self.dbobj.vm_count, self.dbobj.host_count]) pan_assign(lines, "system/cluster/max_hosts", self.dbobj.max_hosts) lines.append("") lines.append("") if isinstance(self.dbobj, EsxCluster) and self.dbobj.switch: pan_assign(lines, "system/cluster/switch", self.dbobj.switch.primary_name) class PlenaryClusterObject(Plenary): """ A cluster has its own output profile, so the plenary cluster template is an object template that includes the data about which machines are contained inside the cluster (via an include of the clusterdata plenary) """ template_type = "object" def __init__(self, dbcluster, logger=LOGGER): Plenary.__init__(self, dbcluster, logger=logger) self.name = dbcluster.name self.loadpath = dbcluster.personality.archetype.name self.plenary_core = "clusters" self.plenary_template = dbcluster.name def get_key(self): return CompileKey(domain=self.dbobj.branch.name, profile=self.plenary_template_name, logger=self.logger) def body(self, lines): pan_include(lines, ["pan/units", "pan/functions"]) pan_assign(lines, "/", StructureTemplate("clusterdata/%s" % self.name, {"metadata": PanValue("/metadata")})) pan_include(lines, "archetype/base") for servinst in sorted(self.dbobj.service_bindings): pan_include(lines, "service/%s/%s/client/config" % (servinst.service.name, servinst.name)) pan_include(lines, "personality/%s/config" % self.dbobj.personality.name) pan_include(lines, "archetype/final") class PlenaryClusterClient(Plenary): """ A host that is a member of a cluster will include the cluster client plenary template. This just names the cluster and nothing more. """ template_type = "" def __init__(self, dbcluster, logger=LOGGER): Plenary.__init__(self, dbcluster, logger=logger) self.name = dbcluster.name self.plenary_core = "cluster/%s" % self.name self.plenary_template = "client" def get_key(self): # This takes a domain lock because it could affect all clients... return CompileKey(domain=self.dbobj.branch.name, logger=self.logger) def body(self, lines): pan_assign(lines, "/system/cluster/name", self.name) # We could just use a PAN external reference to pull in this value from # the cluster template, but since we know that these templates are # always in sync, we can duplicate the content here to avoid the # possibility of circular external references. if self.dbobj.resholder: for resource in sorted(self.dbobj.resholder.resources): pan_append(lines, "/system/cluster/resources/" + resource.resource_type, StructureTemplate(resource.template_base + '/config')) lines.append("include { if_exists('features/' + value('/system/archetype/name') + '/%s/%s/config') };" % (self.dbobj.personality.archetype.name, self.dbobj.personality.name))
{ "content_hash": "d8668a8cf273ca86dd85165ba033c636", "timestamp": "", "source": "github", "line_count": 203, "max_line_length": 110, "avg_line_length": 43.610837438423644, "alnum_prop": 0.6084942957189653, "repo_name": "jrha/aquilon", "id": "7c10a5ce9b747dad24fc9d60679797fb3bb6cec1", "size": "9558", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/python2.6/aquilon/worker/templates/cluster.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from __future__ import absolute_import from .administration import Administration from ._parameters import * __version__ = "3.5.9"
{ "content_hash": "5797f333b912b0415e024b2e7899942e", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 42, "avg_line_length": 26.2, "alnum_prop": 0.7480916030534351, "repo_name": "adegwerth/ArcREST", "id": "40d9e1797d7f3a577a13d8e9a47420a0f243f6c2", "size": "131", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/arcrest/manageorg/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "48383" }, { "name": "Python", "bytes": "2272618" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('payments', '0003_auto_20161025_1221'), ] operations = [ migrations.CreateModel( name='TelesomPayment', fields=[ ('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')), ('amount', models.CharField(blank=True, help_text=b'Amount', max_length=200, null=True)), ('currency', models.CharField(blank=True, default=b'USD', help_text=b'Transaction currency', max_length=200, null=True)), ('mobile', models.CharField(blank=True, help_text=b'Mobile Number', max_length=200, null=True)), ('transaction_reference', models.CharField(blank=True, help_text=b'Transaction reference for tracking transaction', max_length=100, null=True)), ('description', models.CharField(blank=True, help_text=b'Description', max_length=200, null=True)), ('response', models.TextField(blank=True, help_text='Response from Telesom', null=True)), ('update_response', models.TextField(blank=True, help_text='Result from Telesom (status update)', null=True)), ], options={ 'ordering': ('-created', '-updated'), 'verbose_name': 'Telesom/Zaad Payment', 'verbose_name_plural': 'Telesom/Zaad Payments', }, bases=('payments.payment',), ), ]
{ "content_hash": "e80b334116ca0966fe7697bdda156762", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 194, "avg_line_length": 48.77142857142857, "alnum_prop": 0.616871704745167, "repo_name": "onepercentclub/bluebottle", "id": "bee59c8fb147c2846d7e40d4e3754167b1bf0c33", "size": "1780", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bluebottle/payments_telesom/migrations/0001_initial.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "41694" }, { "name": "HTML", "bytes": "246695" }, { "name": "Handlebars", "bytes": "63" }, { "name": "JavaScript", "bytes": "139123" }, { "name": "PHP", "bytes": "35" }, { "name": "PLpgSQL", "bytes": "1369882" }, { "name": "PostScript", "bytes": "2927" }, { "name": "Python", "bytes": "4983116" }, { "name": "Rich Text Format", "bytes": "39109" }, { "name": "SCSS", "bytes": "99555" }, { "name": "Shell", "bytes": "3068" }, { "name": "Smarty", "bytes": "3814" } ], "symlink_target": "" }
import cv2 import time import datetime import numpy as np import argparse import sys import Configuration as conf import ConfigMe import io import os savevideo = False if (len(sys.argv)<2): # Load the configuration file ip = ConfigMe.readconfig("config.ini") port = conf.videoport elif sys.argv[1] == '-f': print ("Forcing IP Address") ip = '192.168.0.110' port = 10000 else: ip = sys.argv[1] print ("Using IP:"+ip) port = 10000 #cap = cv2.VideoCapture(0) #cap = cv2.VideoCapture('/Users/rramele/Documents/AppleStore.Subiendo.I.mov') #cap = cv2.VideoCapture('tcp://192.168.1.1:5555') #cap = cv2.VideoCapture('tcp://192.168.0.3/cgi-bin/fwstream.cgi?FwModId=0&PortId=1&PauseTime=0&FwCgiVer=0x0001') #cap = cv2.VideoCapture('rtsp://192.168.0.3/cam0_0') #cap = cv2.VideoCapture('tcp://192.168.0.110:10000') #cap = cv2.VideoCapture('tcp://10.17.48.177:10000') cap = cv2.VideoCapture('tcp://'+str(ip)+':'+str(port)) #cap = cv2.VideoCapture('udp://localhost:10000') if (savevideo): w = cap.get(cv2.CAP_PROP_FRAME_WIDTH) h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) fourcc = cv2.VideoWriter_fourcc(*"MJPG") ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S') out = cv2.VideoWriter('./data/output.'+st+'.avi',fourcc, 24.0, (int(w),int(h))) print ("Connecting..") def click_and_crop(event, x, y, flags, param): # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_RBUTTONDOWN: print (x, y) cv2.namedWindow("ShinkeyBot Eye") cv2.setMouseCallback("ShinkeyBot Eye", click_and_crop) def hough(frame): edges = cv2.Canny(frame,50,150,apertureSize = 3) minLineLength = 100 maxLineGap = 10 lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap) for x1,y1,x2,y2 in lines[0]: cv2.line(frame,(x1,y1),(x2,y2),(0,255,0),2) return frame for i in range(1,80000): # Capture frame-by-frame ret, frame = cap.read() #frame = cv2.flip(frame,0) #frame = cv2.flip(frame,1) #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #cv2.imwrite('01.png', gray) #Using AKAZE descriptors. #detector = cv2.AKAZE_create() #(kps, descs) = detector.detectAndCompute(gray, None) #print("keypoints: {}, descriptors: {}".format(len(kps), descs.shape)) # draw the keypoints and show the output image #cv2.drawKeypoints(frame, kps, frame, (0, 255, 0)) #edges = cv2.Canny(frame,100,200) #edges = cv2.Canny(frame,50,150,apertureSize = 3) # Convert BGR to HSV # Convert BGR to HSV ##hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # define range of blue color in HSV ##lower_blue = np.array([160,50,50]) ##upper_blue = np.array([185,255,255]) # Threshold the HSV image to get only blue colors ##mask = cv2.inRange(hsv, lower_blue, upper_blue) # Bitwise-AND mask and original image ##res = cv2.bitwise_and(frame,frame, mask= mask) ##res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) ##hough(res) #print frame[474,37] cv2.imshow("ShinkeyBot Eye", frame) if (savevideo): out.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break print ('Done.') #When everything done, release the capture cap.release() time.sleep(5) if (savevideo): out.release() cv2.destroyAllWindows()
{ "content_hash": "5fa2920edc980737214c57c91cb8f8e9", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 112, "avg_line_length": 25, "alnum_prop": 0.686412213740458, "repo_name": "faturita/ShinkeyBot", "id": "018185fce62fef379da74ec286c83da1f9af6106", "size": "3292", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "NeoCortex/IPCamera.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "893244" }, { "name": "C++", "bytes": "104189" }, { "name": "Makefile", "bytes": "19980" }, { "name": "Objective-C", "bytes": "129846" }, { "name": "Objective-C++", "bytes": "354124" }, { "name": "Python", "bytes": "124330" }, { "name": "Shell", "bytes": "6840" } ], "symlink_target": "" }
"""Various hooks to access stuff from the web""" import os, sys, time import urllib def spawnWorkers(num, target, name=None, args=(), kwargs={}, daemon=1, interval=0): """Spawns the given number of workers, by default daemon, and returns a list of them. 'interval' determines the time delay between each launching""" from threading import Thread threads = [] for i in range(num): t = Thread(target=target, name=name, args=args, kwargs=kwargs) t.setDaemon(daemon) t.start() threads.append(t) time.sleep(interval) return threads def dlmany(urls, fnames, nprocs=10, callback=None): """Downloads many images simultaneously. The callback is called with (index, url, fname)""" from urllib import urlretrieve from Queue import Queue assert len(urls) == len(fnames) if not urls: return [] ret = [] q = Queue() outq = Queue() def dlproc(): while 1: u, f = q.get() if not u: break try: os.makedirs(os.path.dirname(f)) except OSError: pass try: fname, junk = urlretrieve(u, f) outq.put((u,fname)) except Exception, e: print >>sys.stderr, 'Exception on %s -> %s: %s' % (u, f, e) outq.put((u,None)) threads = spawnWorkers(nprocs, dlproc, interval=0) for u, f in zip(urls, fnames): q.put((u, f)) i = 0 while len(ret) < len(urls): u, f = outq.get() if callback: callback(i, u, f) ret.append((u, f)) i += 1 return ret class CustomURLopener(urllib.FancyURLopener): """Custom url opener that defines a new user-agent. Needed so that sites don't block us as a crawler.""" version = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5" def prompt_user_passwd(host, realm): """Custom user-password func for downloading, to make sure that we don't block""" return ('', '') urllib._urlopener = CustomURLopener() def checkAndDeleteImgs(fnames): """Checks the given set of image filenames for validity. If any filename is invalid, deletes it from disk. Returns a list of valid fnames, in the same order as given. """ ret = [] todel = [] # try to open each file as an image for fname in fnames: try: Image.open(fname) ret.append(fname) except IOError: todel.append(fname) # remove bad images from disk for fname in todel: try: os.remove(fname) except Exception: pass return ret class GoogleImages(object): """A google images searcher""" def __init__(self,outdir): """Initializes with simple setup""" self.outdir = outdir def _dl(self, q, dir, n_images): """Main internal download function. Given a search term as 'q', downloads images to our outdir. Returns (allret, urls, fnames), where: allret is a list of result dicts from google images urls is a list of thumbnail urls fnames is a list of downloaded image paths Note that the output images are at self.outdir/q/imageid.jpg """ import urllib2 from urllib import quote_plus try: import simplejson as json except ImportError: import json times = [time.time()] allret = [] # get all metadata for start in [0, 8, 16, 24, 32, 40, 48, 56]: # Get userip from text file: f_ip = open('userip.txt','r') myuserip = f_ip.readlines() f_ip.close() # note that we exclude very small image sizes d = dict(userip=myuserip[0].rstrip(), sizes='small|medium|large|xlarge|xxlarge|huge', q=quote_plus(q), start=start) url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%(q)s&userip=%(userip)s&rsz=8&start=%(start)d&imgsz=%(sizes)s' % (d) request = urllib2.Request(url, None, {'Referer': 'http://cnet.com/'}) times.append(time.time()) response = urllib2.urlopen(request) times.append(time.time()) results = json.load(response) allret.extend(results['responseData']['results']) times.append(time.time()) allret = allret[0:n_images] # create output dir #dir = os.path.join(self.outdir, str(n_images), q.replace(' ','_')) try: os.makedirs(dir) except OSError: pass times.append(time.time()) # start downloading images urls, fnames = zip(*[(r['url'], os.path.join(dir, '%04d.jpg' % i)) for i,r in enumerate(allret)]) #imgs = dlmany(urls, fnames, nprocs=16, callback=None) times.append(time.time()) #print getTimeDiffs(times) return (allret, urls, fnames) def getthumbs(self, term, dirname, n_images): """Downloads all thumbnails for the given term (if needed). Checks for a json file in the appropriate location first. Returns a list of valid image filenames.""" try: import simplejson as json except ImportError: import json dir = os.path.join(self.outdir, dirname) jsonfname = os.path.join(dir, 'index.json') # save the search term to a file try: os.makedirs(dir) except OSError: pass term_fname = os.path.join(dir,'search_term.txt') termfile = open(term_fname,'w') print>>termfile,term termfile.close() try: results = json.load(open(jsonfname)) except Exception: # we don't have valid results, so re-download ret, urls, fnames = self._dl(term,dir,n_images) results = dict(results=ret, thumburls=urls, thumbfnames=fnames) json.dump(results, open(jsonfname, 'w'), indent=2) # at this point, we have results one way or the other return results['thumbfnames'] def testgoog(terms,dirpath): """Tests the google image downloader""" G = GoogleImages(outdir=dirpath) for n_images in [20]: for i,term in enumerate(terms): done = 0 counter = 0 MAX_COUNT = 10 while not done: try: t1 = time.time() ret = G.getthumbs(term,'%04d'%i,n_images) print ret, len(ret), time.time()-t1 done = 1 except Exception, e: counter = counter+1 print 'Caught exception %s, so sleeping for a bit' % (e,) if counter < MAX_COUNT: time.sleep(10) else: done = 1 time.sleep(1) if __name__ == '__main__': testgoog(sys.argv[2:],sys.argv[1])
{ "content_hash": "96987e23b3c19ceab3101bd753b99f42", "timestamp": "", "source": "github", "line_count": 196, "max_line_length": 151, "avg_line_length": 35.816326530612244, "alnum_prop": 0.5605413105413105, "repo_name": "brussell123/3dwikipedia", "id": "fb6f65e56eca7dbb959e9bd082c900cea849886e", "size": "7020", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "code/LIBS/google_image_search/googim.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1229657" }, { "name": "C++", "bytes": "2505" }, { "name": "CSS", "bytes": "28968" }, { "name": "Clean", "bytes": "7372" }, { "name": "Groff", "bytes": "12088" }, { "name": "HTML", "bytes": "133325" }, { "name": "M", "bytes": "735" }, { "name": "Makefile", "bytes": "60621" }, { "name": "Matlab", "bytes": "481410" }, { "name": "Objective-C", "bytes": "29878" }, { "name": "Python", "bytes": "79451" }, { "name": "Shell", "bytes": "2170" }, { "name": "TeX", "bytes": "3888" } ], "symlink_target": "" }
import sys from modules.map.proto import map_pb2 from modules.map.proto import map_lane_pb2 from modules.map.proto import map_road_pb2 import math from shapely.geometry import LineString, Point LANE_WIDTH = 3.3 def convert(p, p2, distance): delta_y = p2.y - p.y delta_x = p2.x - p.x # print math.atan2(delta_y, delta_x) left_angle = math.atan2(delta_y, delta_x) + math.pi / 2.0 right_angle = math.atan2(delta_y, delta_x) - math.pi / 2.0 # print angle lp = [] lp.append(p.x + (math.cos(left_angle) * distance)) lp.append(p.y + (math.sin(left_angle) * distance)) rp = [] rp.append(p.x + (math.cos(right_angle) * distance)) rp.append(p.y + (math.sin(right_angle) * distance)) return lp, rp def shift(p, p2, distance, isleft=True): delta_y = p2.y - p.y delta_x = p2.x - p.x # print math.atan2(delta_y, delta_x) angle = 0 if isleft: angle = math.atan2(delta_y, delta_x) + math.pi / 2.0 else: angle = math.atan2(delta_y, delta_x) - math.pi / 2.0 # print angle p1n = [] p1n.append(p.x + (math.cos(angle) * distance)) p1n.append(p.y + (math.sin(angle) * distance)) p2n = [] p2n.append(p2.x + (math.cos(angle) * distance)) p2n.append(p2.y + (math.sin(angle) * distance)) return Point(p1n), Point(p2n) def create_lane(map, id): lane = map.lane.add() lane.id.id = str(id) lane.type = map_lane_pb2.Lane.CITY_DRIVING lane.direction = map_lane_pb2.Lane.FORWARD lane.length = 100.0 lane.speed_limit = 20.0 lane.turn = map_lane_pb2.Lane.NO_TURN #lane.predecessor_id.add().id = str(id - 1) #lane.successor_id.add().id = str(id + 1) left_boundary = lane.left_boundary.curve.segment.add() right_boundary = lane.right_boundary.curve.segment.add() central = lane.central_curve.segment.add() central.length = 100.0 type = lane.left_boundary.boundary_type.add() type.s = 0 type.types.append(map_lane_pb2.LaneBoundaryType.DOTTED_YELLOW) lane.right_boundary.length = 100.0 type = lane.right_boundary.boundary_type.add() type.s = 0 type.types.append(map_lane_pb2.LaneBoundaryType.DOTTED_YELLOW) lane.left_boundary.length = 100.0 return lane, central, left_boundary, right_boundary fpath = sys.argv[1] f = open(fpath, 'r') points = [] for line in f: line = line.replace("\n", '') data = line.split(',') x = float(data[0]) y = float(data[1]) points.append((x, y)) path = LineString(points) length = int(path.length) fmap = open(sys.argv[2], 'w') id = 0 map = map_pb2.Map() road = map.road.add() road.id.id = "1" section = road.section.add() section.id.id = "2" lane = None lane_n1 = None for i in range(length - 1): if i % 100 == 0: id += 1 if lane is not None: lane.successor_id.add().id = str(id) if lane_n1 is not None: lane_n1.successor_id.add().id = str(id + 1000) lane, central, left_boundary, right_boundary = create_lane(map, id) lane_n1, central_n1, left_boundary_n1, right_boundary_n1 = create_lane( map, id + 1000) section.lane_id.add().id = str(id) section.lane_id.add().id = str(id + 1000) left_edge = section.boundary.outer_polygon.edge.add() left_edge.type = map_road_pb2.BoundaryEdge.LEFT_BOUNDARY left_edge_segment = left_edge.curve.segment.add() right_edge = section.boundary.outer_polygon.edge.add() right_edge.type = map_road_pb2.BoundaryEdge.RIGHT_BOUNDARY right_edge_segment = right_edge.curve.segment.add() lane.right_neighbor_forward_lane_id.add().id = str(id + 1000) lane_n1.left_neighbor_forward_lane_id.add().id = str(id) if i > 0: lane.predecessor_id.add().id = str(id - 1) lane_n1.predecessor_id.add().id = str(id - 1 + 1000) right_edge_point = right_edge_segment.line_segment.point.add() left_edge_point = left_edge_segment.line_segment.point.add() left_bound_point = left_boundary.line_segment.point.add() right_bound_point = right_boundary.line_segment.point.add() central_point = central.line_segment.point.add() p = path.interpolate(i - 1) p2 = path.interpolate(i - 1 + 0.5) distance = LANE_WIDTH / 2.0 lp, rp = convert(p, p2, distance) left_bound_point.y = lp[1] left_bound_point.x = lp[0] right_bound_point.y = rp[1] right_bound_point.x = rp[0] central_point.x = p.x central_point.y = p.y left_edge_point.y = lp[1] left_edge_point.x = lp[0] left_sample = lane.left_sample.add() left_sample.s = 0 left_sample.width = LANE_WIDTH / 2.0 right_sample = lane.right_sample.add() right_sample.s = 0 right_sample.width = LANE_WIDTH / 2.0 ##### left_bound_point = left_boundary_n1.line_segment.point.add() right_bound_point = right_boundary_n1.line_segment.point.add() central_point = central_n1.line_segment.point.add() p = path.interpolate(i - 1) p2 = path.interpolate(i - 1 + 0.5) distance = LANE_WIDTH p, p2 = shift(p, p2, distance, False) distance = LANE_WIDTH / 2.0 lp, rp = convert(p, p2, distance) left_bound_point.y = lp[1] left_bound_point.x = lp[0] right_bound_point.y = rp[1] right_bound_point.x = rp[0] central_point.x = p.x central_point.y = p.y right_edge_point.y = rp[1] right_edge_point.x = rp[0] left_sample = lane_n1.left_sample.add() left_sample.s = 0 left_sample.width = LANE_WIDTH / 2.0 right_sample = lane_n1.right_sample.add() right_sample.s = 0 right_sample.width = LANE_WIDTH / 2.0 right_edge_point = right_edge_segment.line_segment.point.add() left_edge_point = left_edge_segment.line_segment.point.add() left_bound_point = left_boundary.line_segment.point.add() right_bound_point = right_boundary.line_segment.point.add() central_point = central.line_segment.point.add() p = path.interpolate(i) p2 = path.interpolate(i + 0.5) distance = LANE_WIDTH / 2.0 lp, rp = convert(p, p2, distance) central_point.x = p.x central_point.y = p.y left_bound_point.y = lp[1] left_bound_point.x = lp[0] right_bound_point.y = rp[1] right_bound_point.x = rp[0] left_edge_point.y = lp[1] left_edge_point.x = lp[0] left_sample = lane.left_sample.add() left_sample.s = i % 100 + 1 left_sample.width = LANE_WIDTH / 2.0 right_sample = lane.right_sample.add() right_sample.s = i % 100 + 1 right_sample.width = LANE_WIDTH / 2.0 ################ left_bound_point = left_boundary_n1.line_segment.point.add() right_bound_point = right_boundary_n1.line_segment.point.add() central_point = central_n1.line_segment.point.add() p = path.interpolate(i) p2 = path.interpolate(i + 0.5) distance = LANE_WIDTH p, p2 = shift(p, p2, distance, False) distance = LANE_WIDTH / 2.0 lp, rp = convert(p, p2, distance) central_point.x = p.x central_point.y = p.y left_bound_point.y = lp[1] left_bound_point.x = lp[0] right_bound_point.y = rp[1] right_bound_point.x = rp[0] right_edge_point.y = rp[1] right_edge_point.x = rp[0] left_sample = lane_n1.left_sample.add() left_sample.s = i % 100 + 1 left_sample.width = LANE_WIDTH / 2.0 right_sample = lane_n1.right_sample.add() right_sample.s = i % 100 + 1 right_sample.width = LANE_WIDTH / 2.0 fmap.write(str(map)) fmap.close()
{ "content_hash": "3c8c2b884e8baa1cfd395f3dd18032cf", "timestamp": "", "source": "github", "line_count": 250, "max_line_length": 79, "avg_line_length": 31.52, "alnum_prop": 0.590482233502538, "repo_name": "xiaoxq/apollo", "id": "7ce829616e5a710da8946fbe038ae23c78bd56d5", "size": "8665", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "modules/tools/map_gen/map_gen_two_lanes_right_ext.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "1922" }, { "name": "Batchfile", "bytes": "791" }, { "name": "C", "bytes": "17934" }, { "name": "C++", "bytes": "17985748" }, { "name": "CMake", "bytes": "3600" }, { "name": "CSS", "bytes": "44631" }, { "name": "Cuda", "bytes": "96408" }, { "name": "Dockerfile", "bytes": "12364" }, { "name": "GLSL", "bytes": "7000" }, { "name": "HTML", "bytes": "21068" }, { "name": "JavaScript", "bytes": "404879" }, { "name": "Makefile", "bytes": "6626" }, { "name": "Python", "bytes": "1274579" }, { "name": "Shell", "bytes": "307968" }, { "name": "Smarty", "bytes": "33150" }, { "name": "Starlark", "bytes": "795380" } ], "symlink_target": "" }
""" raven.contrib.django.urls ~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import try: from django.conf.urls import patterns, url except ImportError: # for Django version less then 1.4 from django.conf.urls.defaults import patterns, url # NOQA urlpatterns = patterns('', url(r'^api/(?:(?P<project_id>[\w_-]+)/)?store/$', 'raven.contrib.django.views.report', name='raven-report'), url(r'^report/', 'raven.contrib.django.views.report'), )
{ "content_hash": "499ce923ef6c8c71c86502cc641caca9", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 112, "avg_line_length": 31.526315789473685, "alnum_prop": 0.66110183639399, "repo_name": "karolaug/python-raven", "id": "f9c17b38ab72fce5887f3f61fad00bbad05725e2", "size": "599", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "raven/contrib/django/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "185685" } ], "symlink_target": "" }
import json import util from collections import defaultdict def get_fb_stats(freebase_data_file): with open(freebase_data_file) as fb: fact_counter = 0 relation_set = set() entity_set = set() for line in fb: line = line.strip() line = line[1:-1] e1, r1, r2, e2 = [a.strip('"') for a in [x.strip() for x in line.split(',')]] r = r1 + '_' + r2 fact_counter += 1 relation_set.add(r) entity_set.add(e1) entity_set.add(e2) print("Total num of facts {}".format(fact_counter)) print("Num unique entities {}".format(len(entity_set))) print("Num unique relations {}".format(len(relation_set))) def get_questions_stats(train_data_file, dev_data_file): print('1. Getting the number of blanks') blank_str = '_blank_' num_blanks_map = defaultdict(int) word_freq_train = defaultdict(int) with open(train_data_file) as train_file: for counter, line in enumerate(util.verboserate(train_file)): line = line.strip() q_json = json.loads(line) q = q_json['sentence'] count = q.count(blank_str) num_blanks_map[count] += 1 words = q.split(' ') for word in words: word = word.strip() word_freq_train[word] += 1 a_list = q_json['answerSubset'] for a in a_list: word_freq_train[a] = word_freq_train[word] + 1 print(num_blanks_map) print '2. Number of word types in the train set {}'.format(len(word_freq_train)) print '3. Checking overlap with the dev answers' dev_answers_present = set() dev_answers_oov = set() dev_answers = set() with open(dev_data_file) as dev_file: for line in dev_file: line = line.strip() dev_json = json.loads(line) a_list = dev_json['answerSubset'] for a in a_list: if a in word_freq_train: dev_answers_present.add(a) else: dev_answers_oov.add(a) dev_answers.add(a) print 'Number of unique dev answer strings {}'.format(len(dev_answers)) print 'Number of oov answer strings in dev set {}'.format(len(dev_answers_oov)) print 'Number of dev answer strings which have atleast 1 occurrences in train set {}'.format( len(dev_answers_present)) freebase_data_file = "/home/rajarshi/research/graph-parser/data/spades/freebase.spades.txt" train_data_file = "/home/rajarshi/research/graph-parser/data/spades/train.json" dev_data_file = "/home/rajarshi/research/graph-parser/data/spades/dev.json" # get_fb_stats() get_questions_stats(train_data_file, dev_data_file)
{ "content_hash": "b136400d7f81acca602258a7de8ba91f", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 97, "avg_line_length": 35.31645569620253, "alnum_prop": 0.5831541218637993, "repo_name": "rajarshd/TextKBQA", "id": "5c289092fca154a970f4baf66643a08b1b3ab8d3", "size": "2790", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "code/get_stats.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "106077" }, { "name": "Shell", "bytes": "6571" } ], "symlink_target": "" }
''' Lightweight wrapper class for preprocessed image data ''' import copy import numpy as np from desispec.maskbits import ccdmask from desispec import util class Image(object): def __init__(self, pix, ivar, mask=None, readnoise=0.0, camera='unknown', meta=None): """ Create Image object Args: pix : 2D numpy.ndarray of image pixels Optional: ivar : inverse variance of pix, same shape as pix mask : 0 is good, non-0 is bad; default is (ivar==0) readnoise : CCD readout noise in electrons/pixel (float) camera : e.g. 'b0', 'r1', 'z9' meta : dict-like metadata key/values, e.g. from FITS header """ if pix.ndim != 2: raise ValueError('pix must be 2D, not {}D'.format(pix.ndim)) if pix.shape != ivar.shape: raise ValueError('pix.shape{} != ivar.shape{}'.format(pix.shape, ivar.shape)) if (mask is not None) and (pix.shape != mask.shape): raise ValueError('pix.shape{} != mask.shape{}'.format(pix.shape, mask.shape)) self.pix = pix self.ivar = ivar self.meta = meta if mask is not None: self.mask = util.mask32(mask) else: self.mask = np.zeros(self.ivar.shape, dtype=np.uint32) self.mask[self.ivar == 0] |= ccdmask.BAD #- Optional parameters self.readnoise = readnoise self.camera = camera #- Allow image slicing def __getitem__(self, xyslice): #- Slices must be a slice object, or a tuple of (slice, slice) if isinstance(xyslice, slice): pass #- valid slice elif isinstance(xyslice, tuple): #- tuples of (slice, slice) are valid if len(xyslice) > 2: raise ValueError('Must slice in 1D or 2D, not {}D'.format(len(xyslice))) else: if not isinstance(xyslice[0], slice) or \ not isinstance(xyslice[1], slice): raise ValueError('Invalid slice for Image objects') else: raise ValueError('Invalid slice for Image objects') pix = self.pix[xyslice] ivar = self.ivar[xyslice] mask = self.mask[xyslice] meta = copy.copy(self.meta) if np.isscalar(self.readnoise): readnoise = self.readnoise else: readnoise = self.readnoise[xyslice] #- NAXIS1 = x, NAXIS2 = y; python slices[y,x] = [NAXIS2, NAXIS1] if meta is not None and (('NAXIS1' in meta) or ('NAXIS2' in meta)): #- image[a:b] instead of image[a:b, c:d] if isinstance(xyslice, slice): ny = xyslice.stop - xyslice.start meta['NAXIS2'] = ny else: slicey, slicex = xyslice #- slices ranges could be None if using : instead of a:b if (slicex.stop is not None): nx = slicex.stop - slicex.start meta['NAXIS1'] = nx if (slicey.stop is not None): ny = slicey.stop - slicey.start meta['NAXIS2'] = ny return Image(pix, ivar, mask, \ readnoise=readnoise, camera=self.camera, meta=meta)
{ "content_hash": "4321ef68a5379a53f63904261fd02a32", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 101, "avg_line_length": 38.20224719101124, "alnum_prop": 0.53, "repo_name": "timahutchinson/desispec", "id": "182571f09f83a34686cd730c3fd47dac8892cc9e", "size": "3400", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "py/desispec/image.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "848450" }, { "name": "Shell", "bytes": "11981" } ], "symlink_target": "" }
""" For the ``future`` package. Adds this import line: from past.builtins import str as oldstr at the top and wraps any unadorned string literals 'abc' or explicit byte-string literals b'abc' in oldstr() calls so the code has the same behaviour on Py3 as on Py2.6/2.7. """ from __future__ import unicode_literals import re from lib2to3 import fixer_base from lib2to3.pgen2 import token from lib2to3.fixer_util import syms from libfuturize.fixer_util import (future_import, touch_import_top, wrap_in_fn_call) _literal_re = re.compile(r"[^uUrR]?[\'\"]") class FixOldstrWrap(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING" def transform(self, node, results): if node.type == token.STRING: touch_import_top(u'past.types', u'oldstr', node) if _literal_re.match(node.value): new = node.clone() # Strip any leading space or comments: # TODO: check: do we really want to do this? new.prefix = u'' new.value = u'b' + new.value wrapped = wrap_in_fn_call("oldstr", [new], prefix=node.prefix) return wrapped def transform(self, node, results): expr1, expr2 = results[0].clone(), results[1].clone() # Strip any leading space for the first number: expr1.prefix = u'' return wrap_in_fn_call("old_div", expr1, expr2, prefix=node.prefix) class FixDivisionSafe(fixer_base.BaseFix): # BM_compatible = True run_order = 4 # this seems to be ignored? _accept_type = token.SLASH PATTERN = """ term<(not('/') any)+ '/' ((not('/') any))> """ def match(self, node): u""" Since the tree needs to be fixed once and only once if and only if it matches, then we can start discarding matches after we make the first. """ if (node.type == self.syms.term and len(node.children) == 3 and match_division(node.children[1])): expr1, expr2 = node.children[0], node.children[2] return expr1, expr2 else: return False def transform(self, node, results): future_import(u"division", node) touch_import_top(u'past.utils', u'old_div', node) expr1, expr2 = results[0].clone(), results[1].clone() # Strip any leading space for the first number: expr1.prefix = u'' return wrap_in_fn_call("old_div", expr1, expr2, prefix=node.prefix)
{ "content_hash": "86bc740b88b9e51782c696121ea3d520", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 80, "avg_line_length": 32.76923076923077, "alnum_prop": 0.5938967136150235, "repo_name": "rfguri/vimfiles", "id": "575292d8c0b1bc7e320ad2840e48404c8170579e", "size": "2556", "binary": false, "copies": "17", "ref": "refs/heads/master", "path": "bundle/ycm/third_party/ycmd/third_party/python-future/src/libfuturize/fixes/fix_oldstr_wrap.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import numpy as np from bokeh.models import BoxSelectTool from bokeh.plotting import figure, show, output_file, gridplot x = np.linspace(0, 4*np.pi, 100) y = np.sin(x) TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select" opts = dict(tools=TOOLS, plot_width=350, plot_height=350) p1 = figure(title="selection on mouseup", **opts) p1.circle(x, y, color="navy", size=6, alpha=0.6) p2 = figure(title="selection on mousemove", **opts) p2.square(x, y, color="olive", size=6, alpha=0.6) p2.select_one(BoxSelectTool).select_every_mousemove = True p3 = figure(title="default highlight", **opts) p3.circle(x, y, color="firebrick", alpha=0.5, size=6) p4 = figure(title="custom highlight", **opts) p4.square(x, y, color="navy", size=6, alpha=0.6, nonselection_color="orange", nonselection_alpha=0.6) output_file("scatter_selection.html", title="scatter_selection.py example") show(gridplot([[p1, p2], [p3, p4]])) # open a browser
{ "content_hash": "b3a3a5638255e0f6d981a4e5e5cd3592", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 75, "avg_line_length": 32.48275862068966, "alnum_prop": 0.7016985138004246, "repo_name": "justacec/bokeh", "id": "43f1a915b7c33bfb1ba576f6a5dffdd30f48dda6", "size": "942", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/plotting/file/scatter_selection.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5455" }, { "name": "CSS", "bytes": "410003" }, { "name": "CoffeeScript", "bytes": "734115" }, { "name": "HTML", "bytes": "27676" }, { "name": "JavaScript", "bytes": "8811" }, { "name": "Jupyter Notebook", "bytes": "3981" }, { "name": "Makefile", "bytes": "5842" }, { "name": "Python", "bytes": "1776018" }, { "name": "Shell", "bytes": "17605" } ], "symlink_target": "" }
"""Iterative linear solvers over pytrees.""" from typing import Callable, TypeVar import jax # Any nested structure of NDArrays, viewed as a vector space (with some extra # structure). VectorPytree = TypeVar("VectorPytree") def richardson_solve(matvec, b, iterations): """Solve a linear system using the Richardson iteration method. This function approximates the solution to Ax = b by iterating the equations x_0 = b x_{k+1} = x_k + b - A x_k = b + (I - A) x_k until convergence or a given maximum number of iterations. This method will converge if ||I - A|| < 1 (for any induced matrix norm). The Richardson method is especially appropriate for systems involving the transition matrix of an absorbing Markov chain. In particular, solving for the number of visits to each state involves computing (I - P)^{-1} x, where A = I - P. In this case, I - A = P, so each estimate x_k is exactly the expected number of visits to each state after k timesteps. If the provided system has ones along the diagonal, note that this method is equivalent to the Jacobi method. (Numerical stability note: If matvec is already of the form (I-P), then (I - A) is (I - (I - P)) which is equivalent to just P. This doesn't cause any significant numerical stability issues though, since the only time this would reduce output precision is when x is large and P(x) is small. In this case, (I-(I-P))(x) will be low precision after one step, but then the new estimate of x will be small so another iteration will restore the missing precision.) Args: matvec: Linear function from (pytrees of) input vectors to (pytrees of) output vectors of the same shape, representing the matrix to solve. b: Dependent variable for the solve, as a (pytree of) vector(s). iterations: Number of steps to iterate. Returns: Approximate solution to the linear system Ax = b, with gradients defined via implicit differentiation (applying Richardson iteration in reverse). """ def do_solve(a_fn, b): """Helper function to iterate the system.""" # Iterate x_k = b + (I-A) x_{k-1} def fixedpt_fn(_, x): return jax.tree_map(lambda bi, xi, ai: bi + xi - ai, b, x, a_fn(x)) return jax.lax.fori_loop(0, iterations, fixedpt_fn, b) # Use custom_linear_solve to get correct derivatives return jax.lax.custom_linear_solve( matvec, b, solve=do_solve, transpose_solve=do_solve)
{ "content_hash": "0b818348dce201da90bd677a8650b423", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 78, "avg_line_length": 39.15873015873016, "alnum_prop": 0.7020672882042968, "repo_name": "google-research/google-research", "id": "45182cf00e378dab00b3d2fa196804b3bc54326d", "size": "3075", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gfsa/linear_solvers.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "9817" }, { "name": "C++", "bytes": "4166670" }, { "name": "CMake", "bytes": "6412" }, { "name": "CSS", "bytes": "27092" }, { "name": "Cuda", "bytes": "1431" }, { "name": "Dockerfile", "bytes": "7145" }, { "name": "Gnuplot", "bytes": "11125" }, { "name": "HTML", "bytes": "77599" }, { "name": "ImageJ Macro", "bytes": "50488" }, { "name": "Java", "bytes": "487585" }, { "name": "JavaScript", "bytes": "896512" }, { "name": "Julia", "bytes": "67986" }, { "name": "Jupyter Notebook", "bytes": "71290299" }, { "name": "Lua", "bytes": "29905" }, { "name": "MATLAB", "bytes": "103813" }, { "name": "Makefile", "bytes": "5636" }, { "name": "NASL", "bytes": "63883" }, { "name": "Perl", "bytes": "8590" }, { "name": "Python", "bytes": "53790200" }, { "name": "R", "bytes": "101058" }, { "name": "Roff", "bytes": "1208" }, { "name": "Rust", "bytes": "2389" }, { "name": "Shell", "bytes": "730444" }, { "name": "Smarty", "bytes": "5966" }, { "name": "Starlark", "bytes": "245038" } ], "symlink_target": "" }
pi = 3.14159 def area(radius): return pi*(radius**2) def circumference(radius): return 2*pi*radius
{ "content_hash": "a23bd1538e4f2a48dd4490dc4e3c94e7", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 26, "avg_line_length": 13.75, "alnum_prop": 0.6636363636363637, "repo_name": "devendermishrajio/MITx-Foundations-of-Computer-Science", "id": "20ecf6a42a9af48d76d092593d7a7b92409638ad", "size": "149", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "6.00.1x.Introduction-to-Computer-Science-and-Programming-Using-Python/Lecture Code/lectureCode_circle.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "18484" } ], "symlink_target": "" }
from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils import deprecated from ..utils.extmath import row_norms from ..utils.extmath import _incremental_mean_and_var from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] DEPRECATION_MSG_1D = ( "Passing 1d arrays as data is deprecated in 0.17 and will " "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." ) def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # substract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* instead of deprecated *data_min*. data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* instead of deprecated *data_max*. data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* instead of deprecated *data_range*. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy @property @deprecated("Attribute data_range will be removed in " "0.19. Use ``data_range_`` instead") def data_range(self): return self.data_range_ @property @deprecated("Attribute data_min will be removed in " "0.19. Use ``data_min_`` instead") def data_min(self): return self.data_min_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* is recommended instead of deprecated *std_*. mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy @property @deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead") def std_(self): return self.scale_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if isinstance(selected, six.string_types) and selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
{ "content_hash": "fb51d23045a6121becafcbd8a7f641b6", "timestamp": "", "source": "github", "line_count": 1855, "max_line_length": 88, "avg_line_length": 35.984366576819404, "alnum_prop": 0.5946277958382646, "repo_name": "jpautom/scikit-learn", "id": "56d4b4e54ae6b20848c0272ddd37085324c605d2", "size": "67091", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "sklearn/preprocessing/data.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C", "bytes": "394788" }, { "name": "C++", "bytes": "140225" }, { "name": "Makefile", "bytes": "1588" }, { "name": "PowerShell", "bytes": "17312" }, { "name": "Python", "bytes": "6245918" }, { "name": "Shell", "bytes": "5112" } ], "symlink_target": "" }
import errno import os import shutil import tempfile from oslo_config import cfg from gnocchi import storage from gnocchi import utils OPTS = [ cfg.StrOpt('file_basepath', default='/var/lib/gnocchi', help='Path used to store gnocchi data files.'), ] class FileStorage(storage.StorageDriver): WRITE_FULL = True def __init__(self, conf, coord=None): super(FileStorage, self).__init__(conf, coord) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') utils.ensure_paths([self.basepath_tmp]) def __str__(self): return "%s: %s" % (self.__class__.__name__, str(self.basepath)) def _atomic_file_store(self, dest, data): tmpfile = tempfile.NamedTemporaryFile( prefix='gnocchi', dir=self.basepath_tmp, delete=False) tmpfile.write(data) tmpfile.close() os.rename(tmpfile.name, dest) def _build_metric_dir(self, metric): return os.path.join(self.basepath, str(metric.id)) def _build_unaggregated_timeserie_path(self, metric, version=3): return os.path.join( self._build_metric_dir(metric), 'none' + ("_v%s" % version if version else "")) def _build_metric_path(self, metric, aggregation): return os.path.join(self._build_metric_dir(metric), "agg_" + aggregation) def _build_metric_path_for_split(self, metric, aggregation, key, version=3): path = os.path.join( self._build_metric_path(metric, aggregation), str(key) + "_" + str(utils.timespan_total_seconds(key.sampling))) return path + '_v%s' % version if version else path def _create_metric(self, metric): path = self._build_metric_dir(metric) try: os.mkdir(path, 0o750) except OSError as e: if e.errno == errno.EEXIST: raise storage.MetricAlreadyExists(metric) raise for agg in metric.archive_policy.aggregation_methods: try: os.mkdir(self._build_metric_path(metric, agg), 0o750) except OSError as e: if e.errno != errno.EEXIST: raise def _store_unaggregated_timeserie(self, metric, data, version=3): self._atomic_file_store( self._build_unaggregated_timeserie_path(metric, version), data) def _get_unaggregated_timeserie(self, metric, version=3): path = self._build_unaggregated_timeserie_path(metric, version) try: with open(path, 'rb') as f: return f.read() except IOError as e: if e.errno == errno.ENOENT: raise storage.MetricDoesNotExist(metric) raise def _list_split_keys(self, metric, aggregation, granularity, version=3): try: files = os.listdir(self._build_metric_path(metric, aggregation)) except OSError as e: if e.errno == errno.ENOENT: raise storage.MetricDoesNotExist(metric) raise keys = set() granularity = str(utils.timespan_total_seconds(granularity)) for f in files: meta = f.split("_") if meta[1] == granularity and self._version_check(f, version): keys.add(meta[0]) return keys def _delete_metric_measures(self, metric, key, aggregation, version=3): os.unlink(self._build_metric_path_for_split( metric, aggregation, key, version)) def _store_metric_measures(self, metric, key, aggregation, data, offset=None, version=3): self._atomic_file_store( self._build_metric_path_for_split( metric, aggregation, key, version), data) def _delete_metric(self, metric): path = self._build_metric_dir(metric) try: shutil.rmtree(path) except OSError as e: if e.errno != errno.ENOENT: # NOTE(jd) Maybe the metric has never been created (no # measures) raise def _get_measures(self, metric, key, aggregation, version=3): path = self._build_metric_path_for_split( metric, aggregation, key, version) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() except IOError as e: if e.errno == errno.ENOENT: if os.path.exists(self._build_metric_dir(metric)): raise storage.AggregationDoesNotExist(metric, aggregation) raise storage.MetricDoesNotExist(metric) raise
{ "content_hash": "e90389ce999a496d214b02cd612750ee", "timestamp": "", "source": "github", "line_count": 137, "max_line_length": 78, "avg_line_length": 35.167883211678834, "alnum_prop": 0.570568700705687, "repo_name": "leandroreox/gnocchi", "id": "697fffffa4c5c492af95d318b4f7fcf60da9f044", "size": "5456", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gnocchi/storage/file.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1064" }, { "name": "Python", "bytes": "807668" }, { "name": "Shell", "bytes": "24197" } ], "symlink_target": "" }
""" Utilities for :mod:`empymod.model` such as checking input parameters. This module consists of four groups of functions: 0. General settings 1. Class EMArray 2. Input parameter checks for modelling 3. Internal utilities """ # Copyright 2016-2022 The emsig community. # # This file is part of empymod. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # Mandatory imports import copy import numpy as np from scipy import special from timeit import default_timer from datetime import timedelta, datetime # Relative imports from empymod import filters, transform # scooby is a soft dependency for empymod try: from scooby import Report as ScoobyReport except ImportError: class ScoobyReport: def __init__(self, additional, core, optional, ncol, text_width, sort): print("\n* WARNING :: `empymod.Report` requires `scooby`." "\n Install it via `pip install scooby`.\n") # Version: We take care of it here instead of in __init__, so we can use it # within the package itself (logs). try: # - Released versions just tags: 1.10.0 # - GitHub commits add .dev#+hash: 1.10.1.dev3+g973038c # - Uncommitted changes add timestamp: 1.10.1.dev3+g973038c.d20191022 from empymod.version import version as __version__ except ImportError: # If it was not installed, then we don't know the version. We could throw a # warning here, but this case *should* be rare. empymod should be installed # properly! __version__ = 'unknown-'+datetime.today().strftime('%Y%m%d') __all__ = ['EMArray', 'check_time_only', 'check_time', 'check_model', 'check_frequency', 'check_hankel', 'check_loop', 'check_dipole', 'check_bipole', 'check_ab', 'check_solution', 'get_abs', 'get_geo_fact', 'get_azm_dip', 'get_off_ang', 'get_layer_nr', 'printstartfinish', 'conv_warning', 'set_minimum', 'get_minimum', 'Report'] # 0. General settings _min_freq = 1e-20 # Minimum frequency [Hz] _min_time = 1e-20 # Minimum time [s] _min_off = 1e-3 # Minimum offset [m] # # > Also used to round src- & rec-coordinates (1e-3 => mm) _min_res = 1e-20 # Minimum value for horizontal/vertical resistivity _min_angle = 1e-10 # Angle factors smaller than that are set to 0 # 1. Class EMArray class EMArray(np.ndarray): r"""Create an EM-ndarray: add *amplitude* <amp> and *phase* <pha> methods. Parameters ---------- data : array Data to which to add `.amp` and `.pha` attributes. Examples -------- >>> import numpy as np >>> from empymod.utils import EMArray >>> emvalues = EMArray(np.array([1+1j, 1-4j, -1+2j])) >>> print(f"Amplitude : {emvalues.amp()}") Amplitude : [1.41421356 4.12310563 2.23606798] >>> print(f"Phase (rad) : {emvalues.pha()}") Phase (rad) : [ 0.78539816 -1.32581766 -4.24874137] >>> print(f"Phase (deg) : {emvalues.pha(deg=True)}") Phase (deg) : [ 45. -75.96375653 -243.43494882] >>> print(f"Phase (deg; lead) : {emvalues.pha(deg=True, lag=False)}") Phase (deg; lead) : [-45. 75.96375653 243.43494882] """ def __new__(cls, data): r"""Create a new EMArray.""" return np.asarray(data).view(cls) def amp(self): """Amplitude of the electromagnetic field.""" return np.abs(self.view()) def pha(self, deg=False, unwrap=True, lag=True): """Phase of the electromagnetic field. Parameters ---------- deg : bool If True the returned phase is in degrees, else in radians. Default is False (radians). unwrap : bool If True the returned phase is unwrapped. Default is True (unwrapped). lag : bool If True the returned phase is lag, else lead defined. Default is True (lag defined). """ # Get phase, lead or lag defined. if lag: pha = np.angle(self.view()) else: pha = np.angle(np.conj(self.view())) # Unwrap if `unwrap`. # np.unwrap removes the EMArray class; # for consistency, we wrap it in EMArray again. if unwrap and self.size > 1: pha = EMArray(np.unwrap(pha)) # Convert to degrees if `deg`. if deg: pha *= 180/np.pi return pha # 2. Input parameter checks for modelling # 2.a <Check>s (alphabetically) def check_ab(ab, verb): r"""Check source-receiver configuration. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- ab : int Source-receiver configuration. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- ab_calc : int Adjusted source-receiver configuration using reciprocity. msrc, mrec : bool If True, src/rec is magnetic; if False, src/rec is electric. """ # Try to cast ab into an integer try: ab = int(ab) except TypeError as e: raise TypeError("<ab> must be an integer.") from e # Check src and rec orientation (<ab> for alpha-beta) # pab: all possible values that <ab> can take pab = [11, 12, 13, 14, 15, 16, 21, 22, 23, 24, 25, 26, 31, 32, 33, 34, 35, 36, 41, 42, 43, 44, 45, 46, 51, 52, 53, 54, 55, 56, 61, 62, 63, 64, 65, 66] if ab not in pab: raise ValueError(f"<ab> must be one of: {pab}; <ab> provided: {ab}.") # Print input <ab> if verb > 2: print(f" Input ab : {ab}") # Check if src and rec are magnetic or electric msrc = ab % 10 > 3 # If True: magnetic src mrec = ab // 10 > 3 # If True: magnetic rec # If rec is magnetic, switch <ab> using reciprocity. if mrec: if msrc: # G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e) ab_calc = ab - 33 # -30 : mrec->erec; -3: msrc->esrc else: # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z) ab_calc = ab % 10*10 + ab // 10 # Swap alpha/beta else: ab_calc = ab # Print actual calculated <ab> if verb > 2: if ab in [36, 63]: print(f"\n> <ab> IS {ab} WHICH IS ZERO; returning") else: print(f" Calculated ab : {ab_calc}") return ab_calc, msrc, mrec def check_bipole(inp, name): r"""Check di-/bipole parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Coordinates of inp (m): [dipole-x, dipole-y, dipole-z, azimuth, dip] or. [bipole-x0, bipole-x1, bipole-y0, bipole-y1, bipole-z0, bipole-z1]. name : str, {'src', 'rec'} Pole-type. Returns ------- inp : list As input, checked for type and length. ninp : int Number of inp. ninpz : int Number of inp depths (ninpz is either 1 or ninp). isdipole : bool True if inp is a dipole. """ def chck_dipole(inp, name): r"""Check inp for shape and type.""" # Check x inp_x = _check_var(inp[0], float, 1, name+'-x') # Check y and ensure it has same dimension as x inp_y = _check_var(inp[1], float, 1, name+'-y', inp_x.shape) # Check z inp_z = _check_var(inp[2], float, 1, name+'-z', (1,), inp_x.shape) # Check if all depths are the same, if so replace by one value if np.all(np.isclose(inp_z-inp_z[0], 0)): inp_z = np.array([inp_z[0]]) return [inp_x, inp_y, inp_z] # Check length of inp. narr = len(inp) if narr not in [5, 6]: raise ValueError(f"Parameter {name} has wrong length! : " f"{narr} instead of 5 (dipole) or 6 (bipole).") # Flag if it is a dipole or not isdipole = narr == 5 if isdipole: # dipole checks # Check x, y, and z out = chck_dipole(inp, name) # Check azimuth and dip inp_a = _check_var(inp[3], float, 1, 'azimuth', (1,), out[0].shape) inp_d = _check_var(inp[4], float, 1, 'dip', (1,), out[0].shape) # How many different depths nz = out[2].size # Expand azimuth and dip to match number of depths if nz > 1: if inp_a.size == 1: inp_a = np.ones(nz)*inp_a if inp_d.size == 1: inp_d = np.ones(nz)*inp_d out = [*out, inp_a, inp_d] else: # bipole checks # Check each pole for x, y, and z out0 = chck_dipole(inp[::2], name+'-1') # [x0, y0, z0] out1 = chck_dipole(inp[1::2], name+'-2') # [x1, y1, z1] # If one pole has a single depth, but the other has various # depths, we have to repeat the single depth, as we will have # to loop over them. if out0[2].size != out1[2].size: if out0[2].size == 1: out0[2] = np.repeat(out0[2], out1[2].size) else: out1[2] = np.repeat(out1[2], out0[2].size) # Check if inp is a dipole instead of a bipole # (This is a problem, as we would could not define the angles then.) if not np.all((out0[0] != out1[0]) + (out0[1] != out1[1]) + (out0[2] != out1[2])): raise ValueError(f"At least one of <{name}> is a point dipole, " "use the format\n[x, y, z, azimuth, dip] " "instead of [x0, x1, y0, y1, z0, z1].") # Collect elements out = [out0[0], out1[0], out0[1], out1[1], out0[2], out1[2]] # How many different depths nz = out[4].size return out, out[0].size, nz, isdipole def check_dipole(inp, name, verb): r"""Check dipole parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Pole coordinates (m): [pole-x, pole-y, pole-z]. name : str, {'src', 'rec'} Pole-type. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- inp : list List of pole coordinates [x, y, z]. ninp : int Number of inp-elements """ # Check inp for x, y, and z; x & y must have same length, z is a float _check_shape(np.squeeze(np.asarray(inp, dtype=object)), name, (3,)) inp_x = _check_var(inp[0], float, 1, name+'-x') inp_y = _check_var(inp[1], float, 1, name+'-y', inp_x.shape) inp_z = _check_var(inp[2], float, 1, name+'-z', (1,)) out = [inp_x, inp_y, inp_z] # Print spatial parameters if verb > 2: # Pole-type: src or rec if name == 'src': longname = ' Source(s) : ' else: longname = ' Receiver(s) : ' print(f"{longname} {out[0].size} dipole(s)") tname = ['x ', 'y ', 'z '] for i in range(3): text = " > " + tname[i] + " [m] : " _prnt_min_max_val(out[i], text, verb) return out, out[0].size def check_frequency(freq, res, aniso, epermH, epermV, mpermH, mpermV, verb): r"""Calculate frequency-dependent parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- freq : array_like Frequencies f (Hz). res : array_like Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1. aniso : array_like Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res. epermH, epermV : array_like Relative horizontal/vertical electric permittivities epsilon_h/epsilon_v (-); #epermH = #epermV = #res. mpermH, mpermV : array_like Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-); #mpermH = #mpermV = #res. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- freq : float Frequency, checked for size and assured min_freq. etaH, etaV : array Parameters etaH/etaV, same size as provided resistivity. zetaH, zetaV : array Parameters zetaH/zetaV, same size as provided resistivity. """ global _min_freq # Check if the user provided a model for etaH/etaV/zetaH/zetaV if isinstance(res, dict): res = res['res'] # Check frequency freq = _check_var(freq, float, 1, 'freq') # As soon as at least one freq >0, we assume frequencies. Only if ALL are # below 0 we assume Laplace and take the negative of it. if np.any(freq > 0): laplace = False text_min = "Frequencies" text_verb = " frequency" else: laplace = True freq = -freq text_min = "Laplace val" text_verb = " s-value " # Minimum frequency to avoid division by zero at freq = 0 Hz. # => min_freq can be set with utils.set_min freq = _check_min(freq, _min_freq, text_min, "Hz", verb) if verb > 2: _prnt_min_max_val(freq, text_verb+" [Hz] : ", verb) # Define Laplace parameter sval. if laplace: sval = freq else: sval = 2j*np.pi*freq # Calculate eta and zeta (horizontal and vertical) c = 299792458 # Speed of light m/s mu_0 = 4e-7*np.pi # Magn. permeability of free space [H/m] epsilon_0 = 1./(mu_0*c*c) # Elec. permittivity of free space [F/m] etaH = 1/res + np.outer(sval, epermH*epsilon_0) etaV = 1/(res*aniso*aniso) + np.outer(sval, epermV*epsilon_0) zetaH = np.outer(sval, mpermH*mu_0) zetaV = np.outer(sval, mpermV*mu_0) return freq, etaH, etaV, zetaH, zetaV def check_hankel(ht, htarg, verb): r"""Check Hankel transform parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- ht : {'dlf', 'qwe', 'quad'} Flag to choose the Hankel transform. htarg : dict Arguments of Hankel transform; depends on the value for `ht`. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- ht, htarg Checked if valid and set to defaults if not provided. """ # Ensure ht is all lowercase ht = ht.lower() # Initiate output dict targ = {} args = copy.deepcopy(htarg) if ht == 'dlf': # DLF # If filter is a name (str), get it targ['dlf'] = args.pop('dlf', filters.key_201_2009()) if isinstance(targ['dlf'], str): targ['dlf'] = getattr(filters, targ['dlf'])() # Ensure the provided filter has the necessary attributes. base = hasattr(targ['dlf'], 'base') j0 = hasattr(targ['dlf'], 'j0') j1 = hasattr(targ['dlf'], 'j1') factor = hasattr(targ['dlf'], 'factor') if not base or not j0 or not j1 or not factor: raise AttributeError( "DLF-filter is missing some attributes; " f"base: {base}; j0: {j0}; j1: {j1}; factor: {factor}.") # Check dimension and type of pts_per_dec targ['pts_per_dec'] = _check_var( args.pop('pts_per_dec', 0.0), float, 0, 'dlf: pts_per_dec', ()) # If verbose, print Hankel transform information if verb > 2: print(" Hankel : DLF (Fast Hankel Transform)") print(f" > Filter : {targ['dlf'].name}") pstr = " > DLF type : " if targ['pts_per_dec'] < 0: print(f"{pstr}Lagged Convolution") elif targ['pts_per_dec'] > 0: print(f"{pstr}Splined, {targ['pts_per_dec']} pts/dec") else: print(f"{pstr}Standard") elif ht == 'qwe': # QWE # rtol : 1e-12 targ['rtol'] = _check_var( args.pop('rtol', 1e-12), float, 0, 'qwe: rtol', ()) # atol : 1e-30 targ['atol'] = _check_var( args.pop('atol', 1e-30), float, 0, 'qwe: atol', ()) # nquad : 51 targ['nquad'] = _check_var( args.pop('nquad', 51), int, 0, 'qwe: nquad', ()) # maxint : 100 targ['maxint'] = _check_var( args.pop('maxint', 100), int, 0, 'qwe: maxint', ()) # pts_per_dec : 0 # No spline pts_per_dec = _check_var( args.pop('pts_per_dec', 0), int, 0, 'qwe: pts_per_dec', ()) targ['pts_per_dec'] = _check_min( pts_per_dec, 0, 'pts_per_dec', '', verb) # diff_quad : 100 targ['diff_quad'] = _check_var( args.pop('diff_quad', 100), float, 0, 'qwe: diff_quad', ()) # a : None targ['a'] = args.pop('a', None) if targ['a'] is not None: targ['a'] = _check_var(targ['a'], float, 0, 'qwe: a (quad)', ()) # b : None targ['b'] = args.pop('b', None) if targ['b'] is not None: targ['b'] = _check_var(targ['b'], float, 0, 'qwe: b (quad)', ()) # limit : None targ['limit'] = args.pop('limit', None) if targ['limit'] is not None: targ['limit'] = _check_var( targ['limit'], int, 0, 'qwe: limit (quad)', ()) # If verbose, print Hankel transform information if verb > 2: print(" Hankel : Quadrature-with-Extrapolation") print(f" > rtol : {targ['rtol']}") print(f" > atol : {targ['atol']}") print(f" > nquad : {targ['nquad']}") print(f" > maxint : {targ['maxint']}") print(f" > pts_per_dec : {targ['pts_per_dec']}") print(f" > diff_quad : {targ['diff_quad']}") if targ['a']: print(f" > a (quad): {targ['a']}") if targ['b']: print(f" > b (quad): {targ['b']}") if targ['limit']: print(f" > limit (quad): {targ['limit']}") elif ht in 'quad': # QUAD # rtol : 1e-12 targ['rtol'] = _check_var( args.pop('rtol', 1e-12), float, 0, 'quad: rtol', ()) # atol : 1e-20 targ['atol'] = _check_var( args.pop('atol', 1e-20), float, 0, 'quad: atol', ()) # limit : 500 targ['limit'] = _check_var( args.pop('limit', 500), int, 0, 'quad: limit', ()) # a : 1e-6 targ['a'] = _check_var(args.pop('a', 1e-6), float, 0, 'quad: a', ()) # b : 0.1 targ['b'] = _check_var(args.pop('b', 0.1), float, 0, 'quad: b', ()) # pts_per_dec : 40 pts_per_dec = _check_var( args.pop('pts_per_dec', 40), int, 0, 'quad: pts_per_dec', ()) targ['pts_per_dec'] = _check_min( pts_per_dec, 1, 'pts_per_dec', '', verb) # If verbose, print Hankel transform information if verb > 2: print(" Hankel : Quadrature") print(f" > rtol : {targ['rtol']}") print(f" > atol : {targ['atol']}") print(f" > limit : {targ['limit']}") print(f" > a : {targ['a']}") print(f" > b : {targ['b']}") print(f" > pts_per_dec : {targ['pts_per_dec']}") else: raise ValueError("<ht> must be one of: ['dlf', 'qwe', 'quad'];" f" <ht> provided: {ht}.") # Check remaining arguments. if args and verb > 0: print(f"* WARNING :: Unknown htarg {args} for method '{ht}'") return ht, targ def check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV, xdirect, verb): r"""Check the model: depth and corresponding layer parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- depth : list Absolute layer interfaces z (m); #depth = #res - 1 (excluding +/- infinity). res : array_like Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1. aniso : array_like Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res. epermH, epermV : array_like Relative horizontal/vertical electric permittivities epsilon_h/epsilon_v (-); #epermH = #epermV = #res. mpermH, mpermV : array_like Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-); #mpermH = #mpermV = #res. xdirect : bool, optional If True and source and receiver are in the same layer, the direct field is calculated analytically in the frequency domain, if False it is calculated in the wavenumber domain. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- depth : array Depths of layer interfaces, adds -infty at beginning if not present. res : array As input, checked for size. aniso : array As input, checked for size. If None, defaults to an array of ones. epermH, epermV : array_like As input, checked for size. If None, defaults to an array of ones. mpermH, mpermV : array_like As input, checked for size. If None, defaults to an array of ones. isfullspace : bool If True, the model is a fullspace (res, aniso, epermH, epermV, mpermM, and mpermV are in all layers the same). """ global _min_res # Check depth if depth is None: depth = [] depth = _check_var(depth, float, 1, 'depth') # If all depths are decreasing, swap depth and parameters. if depth.size > 1 and np.all(depth[1:] - depth[:-1] < 0): swap = -1 else: swap = 1 depth = depth[::swap] # Ensure depth is increasing if np.any(depth[1:] - depth[:-1] < 0): raise ValueError(f"Depth must be continuously increasing or decreasing" f".\n<depth> provided: {_strvar(depth[::swap])}.") # Add -infinity at the beginning # => The top-layer (-infinity to first interface) is layer 0. if depth.size == 0: depth = np.array([-np.infty, ]) else: if depth[0] != -np.infty: depth = np.r_[-np.infty, depth] # Remove +np.infty (can be used to define 2-layer coordinate system). if depth[-1] == np.infty: depth = depth[:-1] # Check if the user provided a model for etaH/etaV/zetaH/zetaV if isinstance(res, dict): res_dict, res = res, res['res'] else: res_dict = False # Cast and check resistivity res = _check_var(res, float, 1, 'res', depth.shape) # => min_res can be set with utils.set_min res = _check_min(res, _min_res, 'Resistivities', 'Ohm.m', verb) # Check optional parameters anisotropy, electric permittivity, and magnetic # permeability def check_inp(var, name, min_val): r"""Param-check function. Default to ones if not provided""" if var is None: return np.ones(depth.size) else: param = _check_var(var, float, 1, name, depth.shape) if name == 'aniso': # Convert aniso into vertical resistivity param = param**2*res param = _check_min(param, min_val, 'Parameter ' + name, '', verb) if name == 'aniso': # Convert vert. resistivity back to aniso param = np.sqrt(param/res) return param # => min_res can be set with utils.set_min aniso = check_inp(aniso, 'aniso', _min_res) epermH = check_inp(epermH, 'epermH', 0.0) # We assume isotropic behaviour if epermH was provided but not epermV if epermV is None: epermV = epermH else: epermV = check_inp(epermV, 'epermV', 0.0) mpermH = check_inp(mpermH, 'mpermH', 0.0) # We assume isotropic behaviour if mpermH was provided but not mpermV if mpermV is None: mpermV = mpermH else: mpermV = check_inp(mpermV, 'mpermV', 0.0) # Swap parameters if depths were given in reverse. res = res[::swap] aniso = aniso[::swap] epermH = epermH[::swap] epermV = epermV[::swap] mpermH = mpermH[::swap] mpermV = mpermV[::swap] # Print model parameters if verb > 2: print(f" depth [m] : {_strvar(depth[1:])}") print(f" res [Ohm.m] : {_strvar(res)}") print(f" aniso [-] : {_strvar(aniso)}") print(f" epermH [-] : {_strvar(epermH)}") print(f" epermV [-] : {_strvar(epermV)}") print(f" mpermH [-] : {_strvar(mpermH)}") print(f" mpermV [-] : {_strvar(mpermV)}") # Check if medium is a homogeneous full-space. If that is the case, the # EM-field is computed analytically directly in the frequency-domain. # Note: Also a stack of layers with the same material parameters is treated # as a homogeneous full-space. isores = (res - res[0] == 0).all()*(aniso - aniso[0] == 0).all() isoep = (epermH - epermH[0] == 0).all()*(epermV - epermV[0] == 0).all() isomp = (mpermH - mpermH[0] == 0).all()*(mpermV - mpermV[0] == 0).all() isfullspace = isores*isoep*isomp # Check parameters of user-provided parameters if res_dict: # Switch off fullspace-option isfullspace = False # Loop over key, value pair and check for key, value in res_dict.items(): if key not in ['res', 'func_eta', 'func_zeta']: res_dict[key] = check_inp(value, key, None) # Put res back res_dict['res'] = res # store res_dict back to res res = res_dict # Print fullspace info if verb > 2 and isfullspace: if xdirect: print("\n> MODEL IS A FULLSPACE; returning analytical " "frequency-domain solution") else: print("\n> MODEL IS A FULLSPACE") # Print xdirect info if verb > 2: if xdirect is None: print(" direct field : Not calculated (secondary field)") elif xdirect: print(" direct field : Comp. in frequency domain") else: print(" direct field : Comp. in wavenumber domain") return depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace def check_loop(loop, ht, htarg, verb): r"""Check loop parameter. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- loop : {None, 'freq', 'off'} Loop flag. ht : {'dlf', 'qwe', 'quad'} Flag to choose the Hankel transform. htarg : dict Arguments of Hankel transform; depends on the value for `ht`. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- loop_freq : bool Boolean if to loop over frequencies. loop_off : bool Boolean if to loop over offsets. """ # Define if to loop over frequencies or over offsets lagged_splined_dlf = False if ht == 'dlf': if htarg['pts_per_dec'] != 0: lagged_splined_dlf = True if ht in ['qwe', 'quad'] or lagged_splined_dlf: loop_freq = True loop_off = False else: loop_off = loop == 'off' loop_freq = loop == 'freq' # If verbose, print loop information if verb > 2: if loop_off: print(" Loop over : Offsets") elif loop_freq: print(" Loop over : Frequencies") else: print(" Loop over : None (all vectorized)") return loop_freq, loop_off def check_time(time, signal, ft, ftarg, verb): r"""Check time domain specific input parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- time : array_like Times t (s). signal : {None, 0, 1, -1} Source signal: - None: Frequency-domain response - -1 : Switch-off time-domain response - 0 : Impulse time-domain response - +1 : Switch-on time-domain response ft : {'dlf', 'qwe', 'fftlog', 'fft'} Flag for Fourier transform. ftarg : dict Arguments of Fourier transform; depends on the value for `ft`. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- time : float Time, checked for size and assured min_time. freq : float Frequencies required for given times and ft-settings. ft, ftarg Checked if valid and set to defaults if not provided, checked with signal. """ # Check time and input signal time = check_time_only(time, signal, verb) # Ensure ft is all lowercase ft = ft.lower() # Initiate output dict targ = {} args = copy.deepcopy(ftarg) if ft == 'dlf': # Fourier-DLF (sin/cos-filters) # Check dimension and type of pts_per_dec targ['pts_per_dec'] = _check_var( args.pop('pts_per_dec', -1.0), float, 0, 'dlf: pts_per_dec', ()) # Check kind; if switch-off/on is required, ensure kind is cosine/sine targ['kind'] = args.pop('kind', 'sin') if signal > 0: targ['kind'] = 'sin' elif signal < 0: targ['kind'] = 'cos' if targ['kind'] not in ['sin', 'cos']: raise ValueError("'kind' must be either 'sin' or 'cos'; " f"provided: {targ['kind']}.") # If filter is a name (str), get it targ['dlf'] = args.pop('dlf', filters.key_201_CosSin_2012()) if isinstance(targ['dlf'], str): targ['dlf'] = getattr(filters, targ['dlf'])() # Ensure the provided filter has the necessary attributes. base = hasattr(targ['dlf'], 'base') if targ['kind'] == 'sin': sincos = hasattr(targ['dlf'], 'sin') else: sincos = hasattr(targ['dlf'], 'cos') factor = hasattr(targ['dlf'], 'factor') if not base or not sincos or not factor: raise AttributeError( "DLF-filter is missing some attributes; base: " f"{base}; {targ['kind']}: {sincos}; factor: {factor}.") # If verbose, print Fourier transform information if verb > 2: if targ['kind'] == 'sin': print(" Fourier : DLF (Sine-Filter)") else: print(" Fourier : DLF (Cosine-Filter)") print(f" > Filter : {targ['dlf'].name}") pstr = " > DLF type : " if targ['pts_per_dec'] < 0: print(f"{pstr}Lagged Convolution") elif targ['pts_per_dec'] > 0: print(f"{pstr}Splined, {targ['pts_per_dec']} pts/dec") else: print(f"{pstr}Standard") # Get required frequencies omega, _ = transform.get_dlf_points( targ['dlf'], time, targ['pts_per_dec']) freq = np.squeeze(omega/2/np.pi) elif ft == 'qwe': # QWE (using sine and imag-part) # If switch-off is required, use cosine, else sine args.pop('sincos', None) if signal >= 0: targ['sincos'] = np.sin else: targ['sincos'] = np.cos # rtol : 1e-8 targ['rtol'] = _check_var( args.pop('rtol', 1e-8), float, 0, 'qwe: rtol', ()) # atol : 1e-20 targ['atol'] = _check_var( args.pop('atol', 1e-20), float, 0, 'qwe: atol', ()) # nquad : 21 targ['nquad'] = _check_var( args.pop('nquad', 21), int, 0, 'qwe: nquad', ()) # maxint : 200 targ['maxint'] = _check_var( args.pop('maxint', 200), int, 0, 'qwe: maxint', ()) # pts_per_dec : 20 pts_per_dec = _check_var( args.pop('pts_per_dec', 20), int, 0, 'qwe: pts_per_dec', ()) targ['pts_per_dec'] = _check_min( pts_per_dec, 1, 'pts_per_dec', '', verb) # diff_quad : 100 targ['diff_quad'] = _check_var( args.pop('diff_quad', 100), int, 0, 'qwe: diff_quad', ()) # a : None targ['a'] = args.pop('a', None) if targ['a'] is not None: targ['a'] = _check_var(targ['a'], float, 0, 'qwe: a (quad)', ()) # b : None targ['b'] = args.pop('b', None) if targ['b'] is not None: targ['b'] = _check_var(targ['b'], float, 0, 'qwe: b (quad)', ()) # limit : None targ['limit'] = args.pop('limit', None) if targ['limit'] is not None: targ['limit'] = _check_var( targ['limit'], int, 0, 'qwe: limit (quad)', ()) # If verbose, print Fourier transform information if verb > 2: print(" Fourier : Quadrature-with-Extrapolation") print(f" > rtol : {targ['rtol']}") print(f" > atol : {targ['atol']}") print(f" > nquad : {targ['nquad']}") print(f" > maxint : {targ['maxint']}") print(f" > pts_per_dec : {targ['pts_per_dec']}") print(f" > diff_quad : {targ['diff_quad']}") if targ['a']: print(f" > a (quad): {targ['a']}") if targ['b']: print(f" > b (quad): {targ['b']}") if targ['limit']: print(f" > limit (quad): {targ['limit']}") # Get required frequencies g_x, _ = special.roots_legendre(targ['nquad']) minf = np.floor(10*np.log10((g_x.min() + 1)*np.pi/2/time.max()))/10 maxf = np.ceil(10*np.log10(targ['maxint']*np.pi/time.min()))/10 freq = np.logspace(minf, maxf, int((maxf-minf)*targ['pts_per_dec']+1)) elif ft == 'fftlog': # FFTLog (using sine and imag-part) # pts_per_dec : 10 pts_per_dec = _check_var( args.pop('pts_per_dec', 10), int, 0, 'fftlog: pts_per_dec', ()) targ['pts_per_dec'] = _check_min( pts_per_dec, 1, 'pts_per_dec', '', verb) # add_dec : [-2, 1] targ['add_dec'] = _check_var( args.pop('add_dec', np.array([-2, 1])), float, 1, 'fftlog: add_dec', (2,)) # q : 0 targ['q'] = _check_var(args.pop('q', 0), float, 0, 'fftlog: q', ()) # Restrict q to +/- 1 if np.abs(targ['q']) > 1: targ['q'] = np.sign(targ['q']) # If switch-off is required, use cosine, else sine args.pop('mu', None) if signal >= 0: targ['mu'] = 0.5 else: targ['mu'] = -0.5 # If verbose, print Fourier transform information if verb > 2: print(" Fourier : FFTLog") print(f" > pts_per_dec : {targ['pts_per_dec']}") print(f" > add_dec : {targ['add_dec']}") print(f" > q : {targ['q']}") # Calculate minimum and maximum required frequency minf = np.log10(1/time.max()) + targ['add_dec'][0] maxf = np.log10(1/time.min()) + targ['add_dec'][1] n = np.int64(maxf - minf)*targ['pts_per_dec'] # Initialize FFTLog, get required parameters freq, tcalc, dlnr, kr, rk = transform.get_fftlog_input( minf, maxf, n, targ['q'], targ['mu']) targ['tcalc'] = tcalc targ['dlnr'] = dlnr targ['kr'] = kr targ['rk'] = rk for name in ['tcalc', 'dlnr', 'kr', 'rk']: # So they don't get caught in the args-check. args.pop(name, None) elif ft == 'fft': # FFT # Keys: dfreq, nfreq, ntot, pts_per_dec, fftfreq # dfreq : 0.002 targ['dfreq'] = _check_var( args.pop('dfreq', 0.002), float, 0, 'fft: dfreq', ()) # nfreq : 2048 targ['nfreq'] = _check_var( args.pop('nfreq', 2048), int, 0, 'fft: nfreq', ()) # ntot nall = 2**np.arange(30) targ['ntot'] = _check_var( args.pop('ntot', nall[np.argmax(nall >= targ['nfreq'])]), # (*) int, 0, 'fft: ntot', ()) # Assure that input ntot is not bigger than nfreq if targ['nfreq'] > targ['ntot']: targ['ntot'] = nall[np.argmax(nall >= targ['nfreq'])] # (*) We could use here fftpack.next_fast_len, but tests have shown # that powers of two yield better results in this case. # pts_per_dec : None targ['pts_per_dec'] = args.pop('pts_per_dec', None) if targ['pts_per_dec'] is not None: pts_per_dec = _check_var( targ['pts_per_dec'], int, 0, 'fft: pts_per_dec', ()) targ['pts_per_dec'] = _check_min( pts_per_dec, 1, 'pts_per_dec', '', verb) # Get required frequencies if targ['pts_per_dec']: # Space actually calc. freqs logarithmically. start = np.log10(targ['dfreq']) stop = np.log10(targ['nfreq']*targ['dfreq']) freq = np.logspace( start, stop, int((stop-start)*targ['pts_per_dec'] + 1)) else: freq = np.arange(1, targ['nfreq']+1)*targ['dfreq'] # If verbose, print Fourier transform information if verb > 2: print(" Fourier : Fast Fourier Transform FFT") print(f" > dfreq : {targ['dfreq']}") print(f" > nfreq : {targ['nfreq']}") print(f" > ntot : {targ['ntot']}") if targ['pts_per_dec']: print(f" > pts_per_dec : {targ['pts_per_dec']}") else: print(" > pts_per_dec : (linear)") else: raise ValueError("<ft> must be one of: ['dlf', 'qwe', " f"'fftlog', 'fft']; <ft> provided: {ft}") # Check remaining arguments. if args and verb > 0: print(f"* WARNING :: Unknown ftarg {args} for method '{ft}'") return time, freq, ft, targ def check_time_only(time, signal, verb): r"""Check time and signal parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- time : array_like Times t (s). signal : {None, 0, 1, -1} Source signal: - None: Frequency-domain response - -1 : Switch-off time-domain response - 0 : Impulse time-domain response - +1 : Switch-on time-domain response verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- time : float Time, checked for size and assured min_time. """ global _min_time # Check input signal if int(signal) not in [-1, 0, 1]: raise ValueError("<signal> must be one of: [None, -1, 0, 1]; " f"<signal> provided: {signal}") # Check time time = _check_var(time, float, 1, 'time') # Minimum time to avoid division by zero at time = 0 s. # => min_time can be set with utils.set_min time = _check_min(time, _min_time, 'Times', 's', verb) if verb > 2: _prnt_min_max_val(time, " time [s] : ", verb) return time def check_solution(solution, signal, ab, msrc, mrec): r"""Check required solution with parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- solution : str String to define analytical solution. signal : {None, 0, 1, -1} Source signal: - None: Frequency-domain response - -1 : Switch-off time-domain response - 0 : Impulse time-domain response - +1 : Switch-on time-domain response msrc, mrec : bool True if src/rec is magnetic, else False. """ # Ensure valid solution. if solution not in ['fs', 'dfs', 'dhs', 'dsplit', 'dtetm']: raise ValueError( "Solution must be one of ['fs', 'dfs', 'dhs', " f"'dsplit', 'dtetm']; <solution> provided: {solution}") # If diffusive solution is required, ensure EE-field. if solution[0] == 'd' and (msrc or mrec): raise ValueError( "Diffusive solution is only implemented for electric " f"sources and electric receivers, <ab> provided: {ab}") # If full solution is required, ensure frequency-domain. if solution == 'fs' and signal is not None: raise ValueError( "Full fullspace solution is only implemented for " f"the frequency domain, <signal> provided: {signal}") # 2.b <Get>s (alphabetically) def get_abs(msrc, mrec, srcazm, srcdip, recazm, recdip, verb): r"""Get required ab's for given angles. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- msrc, mrec : bool True if src/rec is magnetic, else False. srcazm, recazm : float Horizontal source/receiver angle (azimuth). srcdip, recdip : float Vertical source/receiver angle (dip). verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- ab_calc : array of int ab's to calculate for this bipole. """ # Get required ab's (9 at most) ab_calc = np.array([[11, 12, 13], [21, 22, 23], [31, 32, 33]]) if msrc: ab_calc += 3 if mrec: ab_calc += 30 # Switch <ab> using reciprocity. if msrc: # G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e) ab_calc -= 33 # -30 : mrec->erec; -3: msrc->esrc else: # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z) ab_calc = ab_calc % 10*10 + ab_calc // 10 # Swap alpha/beta # Remove unnecessary ab's bab = np.asarray(ab_calc*0+1, dtype=np.bool_) # Remove if source is x- or y-directed check = np.atleast_1d(srcazm) if np.allclose(srcazm % (np.pi/2), 0): # if all angles are multiples of 90 if np.all(np.isclose(check // (np.pi/2) % 2, 0)): # Multiples of pi (180) bab[:, 1] *= False # x-directed source, remove y elif np.all(np.isclose(check // (np.pi/2) % 2, 1)): # Multiples of pi/2 (90) bab[:, 0] *= False # y-directed source, remove x # Remove if source is vertical check = np.atleast_1d(srcdip) if np.allclose(srcdip % (np.pi/2), 0): # if all angles are multiples of 90 if np.all(np.isclose(check // (np.pi/2) % 2, 0)): # Multiples of pi (180) bab[:, 2] *= False # Horizontal, remove z elif np.all(np.isclose(check // (np.pi/2) % 2, 1)): # Multiples of pi/2 (90) bab[:, :2] *= False # Vertical, remove x/y # Remove if receiver is x- or y-directed check = np.atleast_1d(recazm) if np.allclose(recazm % (np.pi/2), 0): # if all angles are multiples of 90 if np.all(np.isclose(check // (np.pi/2) % 2, 0)): # Multiples of pi (180) bab[1, :] *= False # x-directed receiver, remove y elif np.all(np.isclose(check // (np.pi/2) % 2, 1)): # Multiples of pi/2 (90) bab[0, :] *= False # y-directed receiver, remove x # Remove if receiver is vertical check = np.atleast_1d(recdip) if np.allclose(recdip % (np.pi/2), 0): # if all angles are multiples of 90 if np.all(np.isclose(check // (np.pi/2) % 2, 0)): # Multiples of pi (180) bab[2, :] *= False # Horizontal, remove z elif np.all(np.isclose(check // (np.pi/2) % 2, 1)): # Multiples of pi/2 (90) bab[:2, :] *= False # Vertical, remove x/y # Reduce ab_calc = ab_calc[bab].ravel() # Print actual calculated <ab> if verb > 2: print(f" Required ab's : {_strvar(ab_calc)}") return ab_calc def get_geo_fact(ab, srcazm, srcdip, recazm, recdip, msrc, mrec): r"""Get required geometrical scaling factor for given angles. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- ab : int Source-receiver configuration. srcazm, recazm : float Horizontal source/receiver angle. srcdip, recdip : float Vertical source/receiver angle. Returns ------- fact : float Geometrical scaling factor. """ global _min_angle # Get current direction for source and receiver fis = ab % 10 fir = ab // 10 # If rec is magnetic and src not, swap directions (reciprocity). # (They have been swapped in get_abs, but the original scaling applies.) if mrec and not msrc: fis, fir = fir, fis def gfact(bp, azm, dip): r"""Geometrical factor of source or receiver.""" if bp in [1, 4]: # x-directed return np.cos(azm)*np.cos(dip) elif bp in [2, 5]: # y-directed return np.sin(azm)*np.cos(dip) else: # z-directed return np.sin(dip) # Calculate src-rec-factor fsrc = gfact(fis, srcazm, srcdip) frec = gfact(fir, recazm, recdip) fact = np.outer(frec, fsrc) # Set very small angles to proper zero (because e.g. sin(pi/2) != exact 0) # => min_angle can be set with utils.set_min fact[np.abs(fact) < _min_angle] = 0 return fact def get_layer_nr(inp, depth): r"""Get number of layer in which inp resides. .. note:: If zinp is on a layer interface, the layer above the interface is chosen. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Dipole coordinates (m) depth : array Depths of layer interfaces. Returns ------- linp : int or array_like of int Layer number(s) in which inp resides (plural only if bipole). zinp : float or array inp[2] (depths). """ zinp = np.array(inp[2], dtype=np.float64) # depth = [-infty : last interface]; create additional depth-array # pdepth = [fist interface : +infty] pdepth = np.concatenate((depth[1:], np.array([np.infty]))) # Broadcast arrays b_zinp = np.atleast_1d(zinp)[:, None] # Get layers linp = np.where((depth[None, :] < b_zinp)*(pdepth[None, :] >= b_zinp))[1] # Return; squeeze in case of only one inp-depth return np.squeeze(linp), np.squeeze(zinp) def get_off_ang(src, rec, nsrc, nrec, verb): r"""Get depths, offsets, angles, hence spatial input parameters. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- src, rec : list of floats or arrays Source/receiver dipole coordinates x, y, and z (m). nsrc, nrec : int Number of sources/receivers (-). verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- off : array of floats Offsets angle : array of floats Angles """ global _min_off # Pre-allocate off and angle off = np.empty((nrec*nsrc,)) angle = np.empty((nrec*nsrc,)) # Coordinates # Loop over sources, append them one after another. for i in range(nsrc): xco = rec[0] - src[0][i] # X-coordinates [m] yco = rec[1] - src[1][i] # Y-coordinates [m] off[i*nrec:(i+1)*nrec] = np.sqrt(xco*xco + yco*yco) # Offset [m] angle[i*nrec:(i+1)*nrec] = np.arctan2(yco, xco) # Angle [rad] # Note: One could achieve a potential speed-up using np.unique to sort out # src-rec configurations that have the same offset and angle. Very unlikely # for real data. # Minimum offset to avoid singularities at off = 0 m. # => min_off can be set with utils.set_min angle[np.where(off < _min_off)] = np.nan off = _check_min(off, _min_off, 'Offsets', 'm', verb) return off, angle def get_azm_dip(inp, iz, ninpz, intpts, isdipole, strength, name, verb): r"""Get angles, interpolation weights and normalization weights. This check-function is called from one of the modelling routines in :mod:`empymod.model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Input coordinates (m): - [x0, x1, y0, y1, z0, z1] (bipole of finite length) - [x, y, z, azimuth, dip] (dipole, infinitesimal small) iz : int Index of current di-/bipole depth (-). ninpz : int Total number of di-/bipole depths (ninpz = 1 or npinz = nsrc) (-). intpts : int Number of integration points for bipole (-). isdipole : bool Boolean if inp is a dipole. strength : float, optional Source strength (A): - If 0, output is normalized to source and receiver of 1 m length, and source strength of 1 A. - If != 0, output is returned for given source and receiver length, and source strength. name : str, {'src', 'rec'} Pole-type. verb : {0, 1, 2, 3, 4} Level of verbosity. Returns ------- tout : list of floats or arrays Dipole coordinates x, y, and z (m). azm : float or array of floats Horizontal angle (azimuth). dip : float or array of floats Vertical angle (dip). g_w : float or array of floats Factors from Gaussian interpolation. intpts : int As input, checked. inp_w : float or array of floats Factors from source/receiver length and source strength. """ global _min_off # Get this di-/bipole if ninpz == 1: # If there is only one distinct depth, all at once tinp = inp else: # If there are several depths, we take the current one if isdipole: tinp = [np.atleast_1d(inp[0][iz]), np.atleast_1d(inp[1][iz]), np.atleast_1d(inp[2][iz]), np.atleast_1d(inp[3][iz]), np.atleast_1d(inp[4][iz])] else: tinp = [inp[0][iz], inp[1][iz], inp[2][iz], inp[3][iz], inp[4][iz], inp[5][iz]] # Check source strength variable strength = _check_var(strength, float, 0, 'strength', ()) # Dipole/Bipole specific if isdipole: # If input is a dipole, set intpts to 1 intpts = 1 # Check azm azm = _check_var(np.deg2rad(tinp[3]), float, 1, 'azimuth') # Check dip dip = _check_var(np.deg2rad(tinp[4]), float, 1, 'dip') # If dipole, g_w are ones g_w = np.ones(tinp[0].size) # If dipole, inp_w are ones, unless strength > 0 inp_w = np.ones(tinp[0].size) if name == 'src' and strength > 0: inp_w *= strength # Collect output tout = tinp else: # Get lengths in each direction dx = np.squeeze(tinp[1] - tinp[0]) dy = np.squeeze(tinp[3] - tinp[2]) dz = np.squeeze(tinp[5] - tinp[4]) # Length of bipole dl = np.atleast_1d(np.linalg.norm( np.array([dx, dy, dz], dtype=object), axis=0)) # Horizontal deviation from x-axis azm = np.atleast_1d(np.arctan2(dy, dx)) # Vertical deviation from xy-plane down dip = np.atleast_1d(np.pi/2-np.arccos(dz/dl)) # Check intpts intpts = _check_var(intpts, int, 0, 'intpts', ()) # Gauss quadrature if intpts > 2; else set to center of tinp if intpts > 2: # Calculate the dipole positions # Get integration positions and weights g_x, g_w = special.roots_legendre(intpts) g_x = np.outer(g_x, dl/2.0) # Adjust to tinp length g_w /= 2.0 # Adjust to tinp length (dl/2), normalize (1/dl) # Coordinate system is left-handed, positive z down # (East-North-Depth). xinp = tinp[0] + dx/2 + g_x*np.cos(dip)*np.cos(azm) yinp = tinp[2] + dy/2 + g_x*np.cos(dip)*np.sin(azm) zinp = tinp[4] + dz/2 + g_x*np.sin(dip) # Reduce zinp to one, if ninpz is 1 (as they are all the same then) if ninpz == 1: zinp = zinp[:, 0] else: # If intpts < 3: Calculate bipole at tinp-centre for dip/azm # Set intpts to 1 intpts = 1 # Get centre points xinp = np.array(tinp[0] + dx/2) yinp = np.array(tinp[2] + dy/2) zinp = np.array(tinp[4] + dz/2) # Gaussian weights in this case are ones g_w = np.array([1]) # Scaling inp_w = np.ones(dl.size) if strength > 0: # If strength > 0, we scale it by bipole-length inp_w *= dl if name == 'src': # If source, additionally by source strength inp_w *= strength # Collect output list; rounding coord. to same precision as min_off rndco = int(np.round(np.log10(1/_min_off))) tout = [np.round(xinp, rndco).ravel('F'), np.round(yinp, rndco).ravel('F'), np.round(zinp, rndco).ravel('F')] # Print spatial parameters if verb > 2: # Pole-type: src or rec if name == 'src': longname = ' Source(s) : ' else: longname = ' Receiver(s) : ' # Print dipole/bipole information if isdipole: print(f"{longname} {tout[0].size} dipole(s)") tname = ['x ', 'y ', 'z '] prntinp = tout else: print(f"{longname} {int(tout[0].size/intpts)} bipole(s)") tname = ['x_c', 'y_c', 'z_c'] if intpts == 1: print(" > intpts : 1 (as dipole)") prntinp = tout else: print(f" > intpts : {intpts}") prntinp = [np.atleast_1d(tinp[0])[0] + dx/2, np.atleast_1d(tinp[2])[0] + dy/2, np.atleast_1d(tinp[4])[0] + dz/2] # Print bipole length and strength _prnt_min_max_val(dl, " > length [m] : ", verb) print(f" > strength[A] : {_strvar(strength)}") # Print coordinates for i in range(3): text = " > " + tname[i] + " [m] : " _prnt_min_max_val(prntinp[i], text, verb) # Print angles _prnt_min_max_val(np.rad2deg(azm), " > azimuth [°] : ", verb) _prnt_min_max_val(np.rad2deg(dip), " > dip [°] : ", verb) return tout, azm, dip, g_w, intpts, inp_w def get_kwargs(names, defaults, kwargs): """Return wanted parameters, check remaining. 1. Extracts parameters `names` from `kwargs`, filling them with the `defaults`-value if it is not in `kwargs`. 2. Check remaining kwargs; - Raise an error if it is an unknown keyword; - Print warning if it is a keyword from another routine (verb>0). List of possible kwargs: - ALL functions: src, rec, res, aniso, epermH, epermV, mpermH, mpermV, verb - ONLY gpr: cf, gain - ONLY bipole: msrc, srcpts - ONLY dipole_k: freq, wavenumber - ONLY analytical: solution - ONLY bipole, loop: mrec, recpts, strength - ONLY bipole, dipole, loop, gpr: ht, htarg, ft, ftarg, xdirect, loop - ONLY bipole, dipole, loop, analytical: signal - ONLY dipole, analytical, gpr, dipole_k: ab - ONLY bipole, dipole, loop, gpr, dipole_k: depth - ONLY bipole, dipole, loop, analytical, gpr: freqtime Parameters ---------- names: list Names of wanted parameters as strings. defaults: list Default values of wanted parameters, in same order. kwargs : dict Passed-through kwargs. Returns ------ values : list Wanted parameters. """ # Known keys (excludes keys present in ALL routines). known_keys = set([ 'depth', 'ht', 'htarg', 'ft', 'ftarg', 'xdirect', 'loop', 'signal', 'ab', 'freqtime', 'freq', 'wavenumber', 'solution', 'cf', 'gain', 'msrc', 'srcpts', 'mrec', 'recpts', 'strength' ]) # Loop over wanted parameters. out = list() verb = 2 # get_kwargs-internal default. for i, name in enumerate(names): # Catch verb for warnings later on. if name == 'verb': verb = kwargs.get(name, defaults[i]) # Add this parameter to the list. out.append(kwargs.pop(name, defaults[i])) # Check remaining parameters. if kwargs: if not set(kwargs.keys()).issubset(known_keys): raise TypeError(f"Unexpected **kwargs: {kwargs}.") elif verb > 0: print(f"* WARNING :: Unused **kwargs: {kwargs}.") return out def printstartfinish(verb, inp=None, kcount=None): r"""Print start and finish with time measure and kernel count.""" if inp: if verb > 1: ttxt = str(timedelta(seconds=default_timer() - inp)) ktxt = ' ' if kcount: ktxt += str(kcount) + ' kernel call(s)' print(f"\n:: empymod END; runtime = {ttxt} ::{ktxt}\n") else: t0 = default_timer() if verb > 2: print(f"\n:: empymod START :: v{__version__}\n") return t0 def conv_warning(conv, targ, name, verb): r"""Print error if QWE/QUAD did not converge at least once.""" if verb > 0 and not conv: print(f"* WARNING :: {name}" "-quadrature did not converge at least once;\n " "=> desired `atol` and `rtol` might not be achieved.") # 3. Set/get min values def set_minimum(min_freq=None, min_time=None, min_off=None, min_res=None, min_angle=None): r""" Set minimum values of parameters. The given parameters are set to its minimum value if they are smaller. .. note:: set_minimum and get_minimum are derived after set_printoptions and get_printoptions from arrayprint.py in numpy. Parameters ---------- min_freq : float, optional Minimum frequency [Hz] (default 1e-20 Hz). min_time : float, optional Minimum time [s] (default 1e-20 s). min_off : float, optional Minimum offset [m] (default 1e-3 m). Also used to round src- & rec-coordinates. min_res : float, optional Minimum horizontal and vertical resistivity [Ohm.m] (default 1e-20). min_angle : float, optional Minimum angle [-] (default 1e-10). """ global _min_freq, _min_time, _min_off, _min_res, _min_angle if min_freq is not None: _min_freq = min_freq if min_time is not None: _min_time = min_time if min_off is not None: _min_off = min_off if min_res is not None: _min_res = min_res if min_angle is not None: _min_angle = min_angle def get_minimum(): r""" Return the current minimum values. .. note:: set_minimum and get_minimum are derived after set_printoptions and get_printoptions from arrayprint.py in numpy. Returns ------- min_vals : dict Dictionary of current minimum values with keys - min_freq : float - min_time : float - min_off : float - min_res : float - min_angle : float For a full description of these options, see `set_minimum`. """ d = dict(min_freq=_min_freq, min_time=_min_time, min_off=_min_off, min_res=_min_res, min_angle=_min_angle) return d # 4. Internal utilities def _check_shape(var, name, shape, shape2=None): r"""Check that <var> has shape <shape>; if false raise ValueError(name)""" varshape = np.shape(var) if shape != varshape: if shape2: if shape2 != varshape: raise ValueError(f"Parameter {name} has wrong shape! : " f"{varshape} instead of {shape} or {shape2}.") else: raise ValueError(f"Parameter {name} has wrong shape! : " f"{varshape} instead of {shape}.") def _check_var(var, dtype, ndmin, name, shape=None, shape2=None): r"""Return variable as array of dtype, ndmin; shape-checked.""" var = np.array(var, dtype=dtype, copy=True, ndmin=ndmin) if shape: _check_shape(var, name, shape, shape2) return var def _strvar(a, prec='{:G}'): r"""Return variable as a string to print, with given precision.""" return ' '.join([prec.format(i) for i in np.atleast_1d(a)]) def _prnt_min_max_val(var, text, verb): r"""Print variable; if more than three, just min/max, unless verb > 3.""" if var.size > 3: print(f"{text} {_strvar(var.min())} - {_strvar(var.max())} " f": {_strvar(var.size)} [min-max; #]") if verb > 3: print(f" : {_strvar(var)}") else: print(f"{text} {_strvar(np.atleast_1d(var))}") def _check_min(par, minval, name, unit, verb): r"""Check minimum value of parameter.""" scalar = False if par.shape == (): scalar = True par = np.atleast_1d(par) if minval is not None: ipar = np.where(par < minval) par[ipar] = minval if verb > 0 and np.size(ipar) != 0: print(f"* WARNING :: {name} < {str(minval)} {unit}" f" are set to {minval} {unit}!") if scalar: return np.squeeze(par) else: return par # 5. Report class Report(ScoobyReport): r"""Print date, time, and version information. Use `scooby` to print date, time, and package version information in any environment (Jupyter notebook, IPython console, Python console, QT console), either as html-table (notebook) or as plain text (anywhere). Always shown are the OS, number of CPU(s), `numpy`, `scipy`, `numba`, `empymod`, `sys.version`, and time/date. Additionally shown are, if they can be imported, `IPython`, and `matplotlib`. It also shows MKL information, if available. All modules provided in `add_pckg` are also shown. .. note:: The package `scooby` has to be installed in order to use `Report`: ``pip install scooby``. Parameters ---------- add_pckg : packages, optional Package or list of packages to add to output information (must be imported beforehand). ncol : int, optional Number of package-columns in html table (no effect in text-version); Defaults to 3. text_width : int, optional The text width for non-HTML display modes sort : bool, optional Sort the packages when the report is shown Examples -------- >>> import pytest >>> import dateutil >>> from empymod import Report >>> Report() # Default values >>> Report(pytest) # Provide additional package >>> Report([pytest, dateutil], ncol=5) # Set nr of columns """ def __init__(self, add_pckg=None, ncol=3, text_width=80, sort=False): """Initiate a scooby.Report instance.""" # Mandatory packages. core = ['numpy', 'scipy', 'numba', 'empymod'] # Optional packages. optional = ['IPython', 'matplotlib'] super().__init__(additional=add_pckg, core=core, optional=optional, ncol=ncol, text_width=text_width, sort=sort)
{ "content_hash": "5096450df1cc35d0f0addda4c5efc486", "timestamp": "", "source": "github", "line_count": 2034, "max_line_length": 79, "avg_line_length": 32.317109144542776, "alnum_prop": 0.5438364291908174, "repo_name": "prisae/empymod", "id": "ddc8050b48961d0ec67aef1ef3f0fca5d2ee28f5", "size": "65735", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "empymod/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "650728" }, { "name": "Shell", "bytes": "689" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) VERSION = '0.0.48'
{ "content_hash": "62063731db9b4cf3ca4da925ed584e40", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 93, "avg_line_length": 34.6, "alnum_prop": 0.6473988439306358, "repo_name": "kslundberg/pants", "id": "573340e7d5bc45b9b2cb098e66f22c0c5b72a791", "size": "320", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/python/pants/version.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "767" }, { "name": "CSS", "bytes": "11442" }, { "name": "GAP", "bytes": "2459" }, { "name": "Go", "bytes": "1437" }, { "name": "HTML", "bytes": "70131" }, { "name": "Java", "bytes": "302900" }, { "name": "JavaScript", "bytes": "25075" }, { "name": "Protocol Buffer", "bytes": "3783" }, { "name": "Python", "bytes": "3885765" }, { "name": "Scala", "bytes": "84093" }, { "name": "Shell", "bytes": "49520" }, { "name": "Thrift", "bytes": "2583" } ], "symlink_target": "" }
import mock import netaddr from neutron.objects import port_forwarding from neutron.objects import router from neutron.tests import tools from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class PortForwardingObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = port_forwarding.PortForwarding def setUp(self): super(PortForwardingObjectTestCase, self).setUp() self.fip_db_fields = self.get_random_db_fields(router.FloatingIP) del self.fip_db_fields['floating_ip_address'] def random_generate_fip_obj(db_fields, **floatingip): if db_fields.get( 'id', None) and floatingip.get( 'id', None) and db_fields.get('id') == floatingip.get('id'): return db_fields db_fields['id'] = floatingip.get('id', None) db_fields['floating_ip_address'] = tools.get_random_ip_address( version=4) return self.fip_db_fields self.mock_fip_obj = mock.patch.object( router.FloatingIP, 'get_object', side_effect=lambda _, **y: router.FloatingIP.db_model( **random_generate_fip_obj(self.fip_db_fields, **y))).start() class PortForwardingDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = port_forwarding.PortForwarding def setUp(self): super(PortForwardingDbObjectTestCase, self).setUp() self.update_obj_fields( {'floatingip_id': lambda: self._create_test_fip_id_for_port_forwarding(), 'internal_port_id': lambda: self._create_test_port_id()}) # 'portforwardings' table will store the 'internal_ip_address' and # 'internal_port' as a single 'socket' column. # Port forwarding object accepts 'internal_ip_address' and # 'internal_port', but can not filter the records in db, so the # valid filters can not contain them. not_supported_filter_fields = ['internal_ip_address', 'internal_port'] invalid_fields = set( self._test_class.synthetic_fields).union( set(not_supported_filter_fields)) valid_field = [f for f in self._test_class.fields if f not in invalid_fields][0] self.valid_field_filter = {valid_field: self.obj_fields[-1][valid_field]} def _create_test_fip_id_for_port_forwarding(self): fake_fip = '172.23.3.0' ext_net_id = self._create_external_network_id() router_id = self._create_test_router_id() values = { 'floating_ip_address': netaddr.IPAddress(fake_fip), 'floating_network_id': ext_net_id, 'floating_port_id': self._create_test_port_id( network_id=ext_net_id), 'router_id': router_id, } fip_obj = router.FloatingIP(self.context, **values) fip_obj.create() return fip_obj.id def test_db_obj(self): # The reason for rewriting this test is: # 1. Currently, the existing test_db_obj test in # obj_test_base.BaseDbObjectTestCase is not suitable for the case, # for example, the db model is not the same with obj fields # definition. # 2. For port forwarding, the db model will store and accept 'socket', # but the obj fields just only support accepting the parameters # generate 'socket', such as 'internal_ip_address' and # 'internal_port'. obj = self._make_object(self.obj_fields[0]) self.assertIsNone(obj.db_obj) obj.create() self.assertIsNotNone(obj.db_obj) # Make sure the created obj socket field is correct. created_socket = obj.db_obj.socket.split(":") self.assertEqual(created_socket[0], str(obj.internal_ip_address)) self.assertEqual(created_socket[1], str(obj.internal_port)) fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if fields_to_update: old_fields = {} for key, val in fields_to_update.items(): db_model_attr = ( obj.fields_need_translation.get(key, key)) old_fields[db_model_attr] = obj.db_obj[ db_model_attr] if hasattr( obj.db_obj, db_model_attr) else getattr( obj, db_model_attr) setattr(obj, key, val) obj.update() self.assertIsNotNone(obj.db_obj) # Make sure the updated obj socket field is correct. updated_socket = obj.db_obj.socket.split(":") self.assertEqual(updated_socket[0], str(self.obj_fields[1]['internal_ip_address'])) self.assertEqual(updated_socket[1], str(self.obj_fields[1]['internal_port'])) # Then check all update fields had been updated. for k, v in obj.modify_fields_to_db(fields_to_update).items(): self.assertEqual(v, obj.db_obj[k], '%s attribute differs' % k) obj.delete() self.assertIsNone(obj.db_obj) def test_get_objects_queries_constant(self): # NOTE(bzhao) Port Forwarding uses query FLoatingIP for injecting # floating_ip_address and router_id, not depends on relationship, # so it will cost extra SQL query each time for finding the # associated Floating IP by floatingip_id each time(or each # Port Forwarding Object). Rework this if this customized OVO # needs to be changed. pass
{ "content_hash": "915b84445839c20e12b4567dda89fb84", "timestamp": "", "source": "github", "line_count": 130, "max_line_length": 78, "avg_line_length": 44.12307692307692, "alnum_prop": 0.6005927475592747, "repo_name": "noironetworks/neutron", "id": "0500fbd565c50cc7e91cbc121ee1e0b3c8188a3a", "size": "6354", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "neutron/tests/unit/objects/test_port_forwarding.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1047" }, { "name": "Python", "bytes": "11420614" }, { "name": "Shell", "bytes": "38791" } ], "symlink_target": "" }