text
stringlengths
4
1.02M
meta
dict
import json from django.conf import settings class SVGReaderError(Exception): pass class BaseReader(object): """Base reader class not for direct use Subclass this to have your own implementation, the class to use can be defined in the settings with SVG_ICONS_READER_CLASS """ def __init__(self): self.source_file = getattr(settings, 'SVG_ICONS_SOURCE_FILE') if not self.source_file: raise SVGReaderError( "SVG_ICONS_SOURCE_FILE needs to be defined for icons to work.") self.svg_path_data = self.read_source_file() def read_source_file(self): """Read the source file in memory Implement this when subclassing the Reader to have it read the preferred format, the implementation should return a dict with the icon name as key and the svg path as a list data as value. ..:example:: { 'icon1': [ "M365.339 474.828c-19.319-12.616-42.222-18.062.....", "M365.339 474.828c-19.319-12.616-42.222-18.062.....", }, 'icon2': [ "M365.339 474.828c-19.319-12.616-42.222-18.062.....", }, } """ raise NotImplementedError def get_svg_paths(self, icon_name): """Return the path data of the requested icon.""" path_data = self.svg_path_data.get(icon_name) if not path_data: raise SVGReaderError( "No path data found for icon {}".format(icon_name)) return path_data
{ "content_hash": "0717176f315795acc1e1b41f2fde7f0a", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 79, "avg_line_length": 29.472727272727273, "alnum_prop": 0.5650832819247378, "repo_name": "mikedingjan/django-svg-icons", "id": "311be7b5cd08070be51e05930e4feb7e638bdcdf", "size": "1621", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/svg_icons/readers/base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "214" }, { "name": "Makefile", "bytes": "333" }, { "name": "Python", "bytes": "5510" } ], "symlink_target": "" }
"""A PHP devappserver2 runtime.""" import base64 import cStringIO import httplib import logging import os import subprocess import sys import time import urllib import google from google.appengine.api import appinfo from google.appengine.tools.devappserver2 import environ_utils from google.appengine.tools.devappserver2 import http_runtime_constants from google.appengine.tools.devappserver2 import php from google.appengine.tools.devappserver2 import request_rewriter from google.appengine.tools.devappserver2 import runtime_config_pb2 from google.appengine.tools.devappserver2 import safe_subprocess from google.appengine.tools.devappserver2 import wsgi_server SDK_PATH = os.path.abspath( os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'php/sdk')) if not os.path.exists(SDK_PATH): SDK_PATH = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'php/sdk')) SETUP_PHP_PATH = os.path.join(os.path.dirname(php.__file__), 'setup.php') class PHPRuntime(object): """A WSGI application that runs PHP scripts using the PHP CGI binary.""" def __init__(self, config): logging.debug('Initializing runtime with %s', config) self.config = config if appinfo.MODULE_SEPARATOR not in config.version_id: module_id = appinfo.DEFAULT_MODULE version_id = config.version_id else: module_id, version_id = config.version_id.split(appinfo.MODULE_SEPARATOR) self.environ_template = { 'APPLICATION_ID': str(config.app_id), 'CURRENT_MODULE_ID': module_id, 'CURRENT_VERSION_ID': version_id, 'DATACENTER': str(config.datacenter), 'INSTANCE_ID': str(config.instance_id), 'APPENGINE_RUNTIME': 'php', 'AUTH_DOMAIN': str(config.auth_domain), 'HTTPS': 'off', # By default php-cgi does not allow .php files to be run directly so # REDIRECT_STATUS must be set. See: # http://php.net/manual/en/security.cgi-bin.force-redirect.php 'REDIRECT_STATUS': '1', 'REMOTE_API_HOST': str(config.api_host), 'REMOTE_API_PORT': str(config.api_port), 'SERVER_SOFTWARE': http_runtime_constants.SERVER_SOFTWARE, 'TZ': 'UTC', } self.environ_template.update((env.key, env.value) for env in config.environ) def __call__(self, environ, start_response): """Handles an HTTP request for the runtime using a PHP executable. Args: environ: An environ dict for the request as defined in PEP-333. start_response: A function with semantics defined in PEP-333. Returns: An iterable over strings containing the body of the HTTP response. """ user_environ = self.environ_template.copy() environ_utils.propagate_environs(environ, user_environ) user_environ['REQUEST_METHOD'] = environ.get('REQUEST_METHOD', 'GET') user_environ['PATH_INFO'] = environ['PATH_INFO'] user_environ['QUERY_STRING'] = environ['QUERY_STRING'] # Construct the partial URL that PHP expects for REQUEST_URI # (http://php.net/manual/en/reserved.variables.server.php) using part of # the process described in PEP-333 # (http://www.python.org/dev/peps/pep-0333/#url-reconstruction). user_environ['REQUEST_URI'] = urllib.quote(user_environ['PATH_INFO']) if user_environ['QUERY_STRING']: user_environ['REQUEST_URI'] += '?' + user_environ['QUERY_STRING'] # Modify the SCRIPT_FILENAME to specify the setup script that readies the # PHP environment. Put the user script in REAL_SCRIPT_FILENAME. user_environ['REAL_SCRIPT_FILENAME'] = os.path.normpath( os.path.join(self.config.application_root, environ[http_runtime_constants.SCRIPT_HEADER])) user_environ['SCRIPT_FILENAME'] = SETUP_PHP_PATH user_environ['REMOTE_REQUEST_ID'] = environ[ http_runtime_constants.REQUEST_ID_ENVIRON] # Pass the APPLICATION_ROOT so we can use it in the setup script. We will # remove it from the environment before we execute the user script. user_environ['APPLICATION_ROOT'] = self.config.application_root if 'CONTENT_TYPE' in environ: user_environ['CONTENT_TYPE'] = environ['CONTENT_TYPE'] user_environ['HTTP_CONTENT_TYPE'] = environ['CONTENT_TYPE'] if 'CONTENT_LENGTH' in environ: user_environ['CONTENT_LENGTH'] = environ['CONTENT_LENGTH'] user_environ['HTTP_CONTENT_LENGTH'] = environ['CONTENT_LENGTH'] content = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])) else: content = '' # On Windows, in order to run a side-by-side assembly the specified env # must include a valid SystemRoot. if 'SYSTEMROOT' in os.environ: user_environ['SYSTEMROOT'] = os.environ['SYSTEMROOT'] # See http://www.php.net/manual/en/ini.core.php#ini.include-path. include_paths = [self.config.application_root, SDK_PATH] if sys.platform == 'win32': # See https://bugs.php.net/bug.php?id=46034 for quoting requirements. include_path = 'include_path="%s"' % ';'.join(include_paths) else: include_path = 'include_path=%s' % ':'.join(include_paths) args = [self.config.php_config.php_executable_path, '-d', include_path] # Load php.ini from application's root. args.extend(['-c', self.config.application_root]) if self.config.php_config.enable_debugger: args.extend(['-d', 'xdebug.remote_enable="1"']) user_environ['XDEBUG_CONFIG'] = os.environ.get('XDEBUG_CONFIG', '') request_type = environ.pop(http_runtime_constants.REQUEST_TYPE_HEADER, None) if request_type == 'interactive': args.extend(['-d', 'html_errors="0"']) user_environ[http_runtime_constants.REQUEST_TYPE_HEADER] = request_type try: p = safe_subprocess.start_process(args, input_string=content, env=user_environ, cwd=self.config.application_root, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() except Exception as e: logging.exception('Failure to start PHP with: %s', args) start_response('500 Internal Server Error', [(http_runtime_constants.ERROR_CODE_HEADER, '1')]) return ['Failure to start the PHP subprocess with %r:\n%s' % (args, e)] if p.returncode: if request_type == 'interactive': start_response('200 OK', [('Content-Type', 'text/plain')]) message = httplib.HTTPMessage(cStringIO.StringIO(stdout)) return [message.fp.read()] else: logging.error('php failure (%r) with:\nstdout:\n%sstderr:\n%s', p.returncode, stdout, stderr) start_response('500 Internal Server Error', [(http_runtime_constants.ERROR_CODE_HEADER, '1')]) message = httplib.HTTPMessage(cStringIO.StringIO(stdout)) return [message.fp.read()] message = httplib.HTTPMessage(cStringIO.StringIO(stdout)) if 'Status' in message: status = message['Status'] del message['Status'] else: status = '200 OK' # Ensures that we avoid merging repeat headers into a single header, # allowing use of multiple Set-Cookie headers. headers = [] for name in message: for value in message.getheaders(name): headers.append((name, value)) start_response(status, headers) return [message.fp.read()] def main(): config = runtime_config_pb2.Config() config.ParseFromString(base64.b64decode(sys.stdin.read())) server = wsgi_server.WsgiServer( ('localhost', 0), request_rewriter.runtime_rewriter_middleware(PHPRuntime(config))) server.start() print server.port sys.stdout.close() sys.stdout = sys.stderr try: while True: time.sleep(1) except KeyboardInterrupt: pass finally: server.quit() if __name__ == '__main__': main()
{ "content_hash": "6e8eee2cc0338ce891b8fd5ccbcc7549", "timestamp": "", "source": "github", "line_count": 209, "max_line_length": 80, "avg_line_length": 38.33971291866029, "alnum_prop": 0.65443654062149, "repo_name": "levibostian/myBlanky", "id": "4d7bda19803d8e8420b43b51f20e6c288efc0ae7", "size": "8614", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "googleAppEngine/google/appengine/tools/devappserver2/php/runtime.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "29352" }, { "name": "JavaScript", "bytes": "305206" }, { "name": "PHP", "bytes": "4350" }, { "name": "Python", "bytes": "11679977" } ], "symlink_target": "" }
"""Tests for exploration domain objects and methods defined on them.""" import os from core.domain import exp_domain from core.domain import exp_services from core.domain import param_domain from core.tests import test_utils import feconf import utils # Dictionary-like data structures within sample YAML must be formatted # alphabetically to match string equivalence with the YAML generation # methods tested below. # # If evaluating differences in YAML, conversion to dict form via # utils.dict_from_yaml can isolate differences quickly. SAMPLE_YAML_CONTENT = ("""author_notes: '' blurb: '' category: Category init_state_name: %s language_code: en objective: '' param_changes: [] param_specs: {} schema_version: %d skin_customizations: panels_contents: bottom: [] states: %s: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: {} default_outcome: dest: %s feedback: [] param_changes: [] fallbacks: [] hints: [] id: null solution: {} param_changes: [] New state: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: {} default_outcome: dest: New state feedback: [] param_changes: [] fallbacks: - outcome: dest: New state feedback: [] param_changes: [] trigger: customization_args: num_submits: value: 42 trigger_type: NthResubmission hints: [] id: null solution: {} param_changes: [] states_schema_version: %d tags: [] title: Title """) % ( feconf.DEFAULT_INIT_STATE_NAME, exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION, feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME, feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION) SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: %s language_code: en objective: '' param_changes: [] param_specs: {} schema_version: %d skin_customizations: panels_contents: {} states: %s: content: - type: text value: '' interaction: answer_groups: [] customization_args: {} default_outcome: dest: %s feedback: [] param_changes: [] fallbacks: [] id: null param_changes: [] New state: content: - type: text value: '' interaction: answer_groups: [] customization_args: {} default_outcome: dest: New state feedback: [] param_changes: [] fallbacks: - outcome: dest: New state feedback: [] param_changes: [] trigger: customization_args: num_submits: value: 42 trigger_type: NthResubmission id: null param_changes: [] states_schema_version: %d tags: [] """) % ( feconf.DEFAULT_INIT_STATE_NAME, exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION, feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME, feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION) SAMPLE_YAML_CONTENT_WITH_GADGETS = ("""author_notes: '' blurb: '' category: Category init_state_name: %s language_code: en objective: '' param_changes: [] param_specs: {} schema_version: %d skin_customizations: panels_contents: bottom: - customization_args: adviceObjects: value: - adviceTitle: b adviceHtml: <p>c</p> gadget_type: TestGadget gadget_name: ATestGadget visible_in_states: - New state - Second state states: %s: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: %s feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] New state: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: New state feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] Second state: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: Second state feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] states_schema_version: %d tags: [] title: Title """) % ( feconf.DEFAULT_INIT_STATE_NAME, exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION, feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME, feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION) TEST_GADGETS = { 'TestGadget': { 'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget') } } TEST_GADGET_CUSTOMIZATION_ARGS = { 'adviceObjects': { 'value': [{ 'adviceTitle': 'b', 'adviceHtml': '<p>c</p>' }] } } TEST_GADGET_DICT = { 'gadget_type': 'TestGadget', 'gadget_name': 'ATestGadget', 'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS, 'visible_in_states': ['First state'] } class ExplorationDomainUnitTests(test_utils.GenericTestBase): """Test the exploration domain object.""" # TODO(bhenning): The validation tests below should be split into separate # unit tests. Also, all validation errors should be covered in the tests. def test_validation(self): """Test validation of explorations.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.init_state_name = '' exploration.states = {} exploration.title = 'Hello #' self._assert_validation_error(exploration, 'Invalid character #') exploration.title = 'Title' exploration.category = 'Category' # Note: If '/' ever becomes a valid state name, ensure that the rule # editor frontend tenplate is fixed -- it currently uses '/' as a # sentinel for an invalid state name. bad_state = exp_domain.State.create_default_state('/') exploration.states = {'/': bad_state} self._assert_validation_error( exploration, 'Invalid character / in a state name') new_state = exp_domain.State.create_default_state('ABC') new_state.update_interaction_id('TextInput') # The 'states' property must be a non-empty dict of states. exploration.states = {} self._assert_validation_error( exploration, 'exploration has no states') exploration.states = {'A string #': new_state} self._assert_validation_error( exploration, 'Invalid character # in a state name') exploration.states = {'A string _': new_state} self._assert_validation_error( exploration, 'Invalid character _ in a state name') exploration.states = {'ABC': new_state} self._assert_validation_error( exploration, 'has no initial state name') exploration.init_state_name = 'initname' self._assert_validation_error( exploration, r'There is no state in \[\'ABC\'\] corresponding to ' 'the exploration\'s initial state name initname.') # Test whether a default outcome to a non-existing state is invalid. exploration.states = {exploration.init_state_name: new_state} self._assert_validation_error( exploration, 'destination ABC is not a valid') # Restore a valid exploration. init_state = exploration.states[exploration.init_state_name] default_outcome = init_state.interaction.default_outcome default_outcome.dest = exploration.init_state_name exploration.validate() # Ensure an answer group with two classifier rules is invalid init_state.interaction.answer_groups.append( exp_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': exploration.init_state_name, 'feedback': ['Feedback'], 'param_changes': [], }, 'rule_specs': [{ 'inputs': { 'training_data': ['Test'] }, 'rule_type': 'FuzzyMatches' }, { 'inputs': { 'training_data': ['Test'] }, 'rule_type': 'FuzzyMatches' }], 'correct': False, }) ) self._assert_validation_error( exploration, 'AnswerGroups can only have one classifier rule.') # Restore a valid exploration. init_state.interaction.answer_groups.pop() exploration.validate() # Ensure an invalid destination can also be detected for answer groups. # Note: The state must keep its default_outcome, otherwise it will # trigger a validation error for non-terminal states needing to have a # default outcome. To validate the outcome of the answer group, this # default outcome must point to a valid state. init_state = exploration.states[exploration.init_state_name] default_outcome = init_state.interaction.default_outcome default_outcome.dest = exploration.init_state_name init_state.interaction.answer_groups.append( exp_domain.AnswerGroup.from_dict({ 'outcome': { 'dest': exploration.init_state_name, 'feedback': ['Feedback'], 'param_changes': [], }, 'rule_specs': [{ 'inputs': { 'x': 'Test' }, 'rule_type': 'Contains' }], 'correct': False, }) ) exploration.validate() interaction = init_state.interaction answer_groups = interaction.answer_groups answer_group = answer_groups[0] answer_group.outcome.dest = 'DEF' self._assert_validation_error( exploration, 'destination DEF is not a valid') # Restore a valid exploration. exploration.states[exploration.init_state_name].update_interaction_id( 'TextInput') answer_group.outcome.dest = exploration.init_state_name exploration.validate() # Validate RuleSpec. rule_spec = answer_group.rule_specs[0] rule_spec.inputs = {} self._assert_validation_error( exploration, 'RuleSpec \'Contains\' is missing inputs') rule_spec.inputs = 'Inputs string' self._assert_validation_error( exploration, 'Expected inputs to be a dict') rule_spec.inputs = {'x': 'Test'} rule_spec.rule_type = 'FakeRuleType' self._assert_validation_error(exploration, 'Unrecognized rule type') rule_spec.inputs = {'x': 15} rule_spec.rule_type = 'Contains' with self.assertRaisesRegexp( Exception, 'Expected unicode string, received 15' ): exploration.validate() rule_spec.inputs = {'x': '{{ExampleParam}}'} self._assert_validation_error( exploration, 'RuleSpec \'Contains\' has an input with name \'x\' which refers ' 'to an unknown parameter within the exploration: ExampleParam') # Restore a valid exploration. exploration.param_specs['ExampleParam'] = param_domain.ParamSpec( 'UnicodeString') exploration.validate() # Validate Outcome. outcome = answer_group.outcome destination = exploration.init_state_name outcome.dest = None self._assert_validation_error( exploration, 'Every outcome should have a destination.') # Try setting the outcome destination to something other than a string. outcome.dest = 15 self._assert_validation_error( exploration, 'Expected outcome dest to be a string') outcome.dest = destination outcome.feedback = 'Feedback' self._assert_validation_error( exploration, 'Expected outcome feedback to be a list') outcome.feedback = [15] self._assert_validation_error( exploration, 'Expected outcome feedback item to be a string') outcome.feedback = ['Feedback'] exploration.validate() outcome.param_changes = 'Changes' self._assert_validation_error( exploration, 'Expected outcome param_changes to be a list') outcome.param_changes = [] exploration.validate() # Validate InteractionInstance. interaction.id = 15 self._assert_validation_error( exploration, 'Expected interaction id to be a string') interaction.id = 'SomeInteractionTypeThatDoesNotExist' self._assert_validation_error(exploration, 'Invalid interaction id') interaction.id = 'TextInput' exploration.validate() interaction.customization_args = [] self._assert_validation_error( exploration, 'Expected customization args to be a dict') interaction.customization_args = {15: ''} self._assert_validation_error( exploration, 'Invalid customization arg name') interaction.customization_args = {'placeholder': ''} exploration.validate() interaction.answer_groups = {} self._assert_validation_error( exploration, 'Expected answer groups to be a list') interaction.answer_groups = answer_groups interaction.id = 'EndExploration' self._assert_validation_error( exploration, 'Terminal interactions must not have a default outcome.') interaction.id = 'TextInput' interaction.default_outcome = None self._assert_validation_error( exploration, 'Non-terminal interactions must have a default outcome.') interaction.id = 'EndExploration' self._assert_validation_error( exploration, 'Terminal interactions must not have any answer groups.') # A terminal interaction without a default outcome or answer group is # valid. This resets the exploration back to a valid state. interaction.answer_groups = [] exploration.validate() interaction.fallbacks = {} self._assert_validation_error( exploration, 'Expected fallbacks to be a list') # Restore a valid exploration. interaction.id = 'TextInput' interaction.answer_groups = answer_groups interaction.default_outcome = default_outcome interaction.fallbacks = [] exploration.validate() interaction.hints = {} self._assert_validation_error( exploration, 'Expected hints to be a list') # Validate AnswerGroup. answer_group.rule_specs = {} self._assert_validation_error( exploration, 'Expected answer group rules to be a list') answer_group.rule_specs = [] self._assert_validation_error( exploration, 'There must be at least one rule for each answer group.') exploration.states = { exploration.init_state_name: exp_domain.State.create_default_state( exploration.init_state_name) } exploration.states[exploration.init_state_name].update_interaction_id( 'TextInput') exploration.validate() exploration.language_code = 'fake_code' self._assert_validation_error(exploration, 'Invalid language_code') exploration.language_code = 'English' self._assert_validation_error(exploration, 'Invalid language_code') exploration.language_code = 'en' exploration.validate() exploration.param_specs = 'A string' self._assert_validation_error(exploration, 'param_specs to be a dict') exploration.param_specs = { '@': param_domain.ParamSpec.from_dict({ 'obj_type': 'UnicodeString' }) } self._assert_validation_error( exploration, 'Only parameter names with characters') exploration.param_specs = { 'notAParamSpec': param_domain.ParamSpec.from_dict( {'obj_type': 'UnicodeString'}) } exploration.validate() def test_fallbacks_validation(self): """Test validation of state fallbacks.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('TextInput') exploration.validate() base_outcome = { 'dest': exploration.init_state_name, 'feedback': [], 'param_changes': [], } init_state.update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'FakeTriggerName', 'customization_args': { 'num_submits': { 'value': 42, }, }, }, 'outcome': base_outcome, }]) self._assert_validation_error(exploration, 'Unknown trigger type') with self.assertRaises(KeyError): init_state.update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'NthResubmission', 'customization_args': { 'num_submits': { 'value': 42, }, }, }, 'outcome': {}, }]) init_state.update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'NthResubmission', 'customization_args': {}, }, 'outcome': base_outcome, }]) # Default values for the customization args will be added silently. exploration.validate() self.assertEqual(len(init_state.interaction.fallbacks), 1) self.assertEqual( init_state.interaction.fallbacks[0].trigger.customization_args, { 'num_submits': { 'value': 3, } }) init_state.update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'NthResubmission', 'customization_args': { 'num_submits': { 'value': 42, }, 'bad_key_that_will_get_stripped_silently': { 'value': 'unused_value', } }, }, 'outcome': base_outcome, }]) # Unused customization arg keys will be stripped silently. exploration.validate() self.assertEqual(len(init_state.interaction.fallbacks), 1) self.assertEqual( init_state.interaction.fallbacks[0].trigger.customization_args, { 'num_submits': { 'value': 42, } }) init_state.update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'NthResubmission', 'customization_args': { 'num_submits': { 'value': 2, }, }, }, 'outcome': base_outcome, }]) exploration.validate() def test_hints_validation(self): """Test validation of state hints.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('TextInput') exploration.validate() init_state.update_interaction_hints([{ 'hint_text': 'hint one', }]) solution = { 'answer_is_exclusive': False, 'correct_answer': 'helloworld!', 'explanation': 'hello_world is a string', } init_state.interaction.solution = solution exploration.validate() # Add hint and delete hint init_state.add_hint('new hint') self.assertEquals( init_state.interaction.hints[1].hint_text, 'new hint') init_state.add_hint('hint three') init_state.delete_hint(1) self.assertEquals( len(init_state.interaction.hints), 2) exploration.validate() def test_solution_validation(self): """Test validation of state solution.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('TextInput') exploration.validate() init_state.add_hint('hint #1') solution = { 'answer_is_exclusive': False, 'correct_answer': [0, 0], 'explanation': 'hello_world is a string', } init_state.interaction.solution = solution # Object type of answer must match that of correct_answer with self.assertRaises(AssertionError): exploration.validate() solution = { 'answer_is_exclusive': False, 'correct_answer': 'hello_world!', 'explanation': 'hello_world is a string', } init_state.interaction.solution = solution exploration.validate() def test_tag_validation(self): """Test validation of exploration tags.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.objective = 'Objective' init_state = exploration.states[exploration.init_state_name] init_state.update_interaction_id('EndExploration') init_state.interaction.default_outcome = None exploration.validate() exploration.tags = 'this should be a list' self._assert_validation_error( exploration, 'Expected \'tags\' to be a list') exploration.tags = [123] self._assert_validation_error(exploration, 'to be a string') exploration.tags = ['abc', 123] self._assert_validation_error(exploration, 'to be a string') exploration.tags = [''] self._assert_validation_error(exploration, 'Tags should be non-empty') exploration.tags = ['123'] self._assert_validation_error( exploration, 'should only contain lowercase letters and spaces') exploration.tags = ['ABC'] self._assert_validation_error( exploration, 'should only contain lowercase letters and spaces') exploration.tags = [' a b'] self._assert_validation_error( exploration, 'Tags should not start or end with whitespace') exploration.tags = ['a b '] self._assert_validation_error( exploration, 'Tags should not start or end with whitespace') exploration.tags = ['a b'] self._assert_validation_error( exploration, 'Adjacent whitespace in tags should be collapsed') exploration.tags = ['abc', 'abc'] self._assert_validation_error( exploration, 'Some tags duplicate each other') exploration.tags = ['computer science', 'analysis', 'a b c'] exploration.validate() def test_exploration_skin_and_gadget_validation(self): """Test that Explorations including gadgets validate properly.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) invalid_gadget_instance = exp_domain.GadgetInstance( 'bad_type', 'aUniqueGadgetName', [], {}) with self.assertRaisesRegexp( utils.ValidationError, 'Unknown gadget with type bad_type is not in the registry.' ): invalid_gadget_instance.validate() with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): gadget_instance = exploration.skin_instance.panel_contents_dict[ 'bottom'][0] # Force a GadgetInstance to require certain state names. gadget_instance.visible_in_states.extend(['DEF', 'GHI']) self._assert_validation_error( exploration, 'Exploration missing required states: DEF, GHI') def_state = exp_domain.State.create_default_state('DEF') def_state.update_interaction_id('TextInput') exploration.states['DEF'] = def_state self._assert_validation_error( exploration, 'Exploration missing required state: GHI') ghi_state = exp_domain.State.create_default_state('GHI') ghi_state.update_interaction_id('TextInput') exploration.states['GHI'] = ghi_state exploration.validate() # Force a gadget name collision. gadget_instance.visible_in_states = ['DEF'] exploration.add_gadget(TEST_GADGET_DICT, 'bottom') exploration.skin_instance.panel_contents_dict[ 'bottom'][1].visible_in_states = ['GHI'] self._assert_validation_error( exploration, 'ATestGadget gadget instance name must be unique.') exploration.skin_instance.panel_contents_dict['bottom'].pop() gadget_instance.visible_in_states.extend(['DEF']) self._assert_validation_error( exploration, 'TestGadget specifies visibility repeatedly for state: DEF') # Remove duplicate state. gadget_instance.visible_in_states.pop() # Adding a panel that doesn't exist in the skin. exploration.skin_instance.panel_contents_dict[ 'non_existent_panel'] = [] self._assert_validation_error( exploration, 'The panel name \'non_existent_panel\' is invalid.') def test_gadget_name_validation(self): """Test that gadget naming conditions validate properly.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): gadget_instance = exploration.skin_instance.panel_contents_dict[ 'bottom'][0] gadget_instance.validate() gadget_instance.name = '' self._assert_validation_error( gadget_instance, 'Gadget name must not be an empty string.') gadget_instance.name = 0 self._assert_validation_error( gadget_instance, 'Gadget name must be a string. Received type: int') gadget_instance.name = 'ASuperLongGadgetNameThatExceedsTheLimit' max_length = exp_domain.GadgetInstance._MAX_GADGET_NAME_LENGTH # pylint: disable=protected-access self._assert_validation_error( gadget_instance, 'ASuperLongGadgetNameThatExceedsTheLimit gadget name' ' exceeds maximum length of %d' % max_length) gadget_instance.name = 'VERYGADGET!' self._assert_validation_error( gadget_instance, 'Gadget names must be alphanumeric. Spaces are allowed. ' 'Received: VERYGADGET!') gadget_instance.name = 'Name with \t tab' self._assert_validation_error( gadget_instance, 'Gadget names must be alphanumeric. Spaces are allowed. ' 'Received: Name with \t tab') gadget_instance.name = 'Name with \n newline' self._assert_validation_error( gadget_instance, 'Gadget names must be alphanumeric. Spaces are allowed. ' 'Received: Name with \n newline') gadget_instance.name = 'Name with 3 space' self._assert_validation_error( gadget_instance, 'Gadget names must be alphanumeric. Spaces are allowed. ' 'Received: Name with 3 space') gadget_instance.name = ' untrim whitespace ' self._assert_validation_error( gadget_instance, 'Gadget names must be alphanumeric. Spaces are allowed. ' 'Received: untrim whitespace ') # Names with spaces and number should pass. gadget_instance.name = 'Space and 1' gadget_instance.validate() def test_exploration_get_gadget_types(self): """Test that Exploration.get_gadget_types returns apt results.""" exploration_without_gadgets = exp_domain.Exploration.from_yaml( 'An Exploration ID', SAMPLE_YAML_CONTENT) self.assertEqual(exploration_without_gadgets.get_gadget_types(), []) exploration_with_gadgets = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) self.assertEqual( exploration_with_gadgets.get_gadget_types(), ['TestGadget']) another_gadget = exp_domain.GadgetInstance( 'AnotherGadget', 'GadgetUniqueName1', [], {} ) exploration_with_gadgets.skin_instance.panel_contents_dict[ 'bottom'].append(another_gadget) self.assertEqual( exploration_with_gadgets.get_gadget_types(), ['AnotherGadget', 'TestGadget'] ) def test_title_category_and_objective_validation(self): """Test that titles, categories and objectives are validated only in 'strict' mode. """ self.save_new_valid_exploration( 'exp_id', 'user@example.com', title='', category='', objective='', end_state_name='End') exploration = exp_services.get_exploration_by_id('exp_id') exploration.validate() with self.assertRaisesRegexp( utils.ValidationError, 'title must be specified' ): exploration.validate(strict=True) exploration.title = 'A title' with self.assertRaisesRegexp( utils.ValidationError, 'category must be specified' ): exploration.validate(strict=True) exploration.category = 'A category' with self.assertRaisesRegexp( utils.ValidationError, 'objective must be specified' ): exploration.validate(strict=True) exploration.objective = 'An objective' exploration.validate(strict=True) def test_audio_translation_validation(self): """Test validation of audio translations.""" audio_translation = exp_domain.AudioTranslation('a.mp3', 20, True) audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Expected audio filename to be a string' ): with self.swap(audio_translation, 'filename', 20): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Invalid audio filename' ): with self.swap(audio_translation, 'filename', '.invalidext'): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Invalid audio filename' ): with self.swap(audio_translation, 'filename', 'justanextension'): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Invalid audio filename' ): with self.swap(audio_translation, 'filename', 'a.invalidext'): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Expected file size to be an int' ): with self.swap(audio_translation, 'file_size_bytes', 'abc'): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Invalid file size' ): with self.swap(audio_translation, 'file_size_bytes', -3): audio_translation.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Expected needs_update to be a bool' ): with self.swap(audio_translation, 'needs_update', 'hello'): audio_translation.validate() def test_subtitled_html_validation(self): """Test validation of subtitled HTML.""" audio_translation = exp_domain.AudioTranslation( 'a.mp3', 20, True) subtitled_html = exp_domain.SubtitledHtml('some html', { 'hi-en': audio_translation, }) subtitled_html.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Invalid content HTML' ): with self.swap(subtitled_html, 'html', 20): subtitled_html.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Expected audio_translations to be a dict' ): with self.swap(subtitled_html, 'audio_translations', 'not_dict'): subtitled_html.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Expected language code to be a string' ): with self.swap(subtitled_html, 'audio_translations', {20: audio_translation}): subtitled_html.validate() with self.assertRaisesRegexp( utils.ValidationError, 'Unrecognized language code' ): with self.swap(subtitled_html, 'audio_translations', {'invalid-code': audio_translation}): subtitled_html.validate() def test_is_demo_property(self): """Test the is_demo property.""" demo = exp_domain.Exploration.create_default_exploration('0') self.assertEqual(demo.is_demo, True) notdemo1 = exp_domain.Exploration.create_default_exploration('a') self.assertEqual(notdemo1.is_demo, False) notdemo2 = exp_domain.Exploration.create_default_exploration('abcd') self.assertEqual(notdemo2.is_demo, False) def test_exploration_export_import(self): """Test that to_dict and from_dict preserve all data within an exploration. """ demo = exp_domain.Exploration.create_default_exploration('0') demo_dict = demo.to_dict() exp_from_dict = exp_domain.Exploration.from_dict(demo_dict) self.assertEqual(exp_from_dict.to_dict(), demo_dict) def test_interaction_with_none_id_is_not_terminal(self): """Test that an interaction with an id of None leads to is_terminal being false. """ # Default exploration has a default interaction with an ID of None. demo = exp_domain.Exploration.create_default_exploration('0') init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME] self.assertFalse(init_state.interaction.is_terminal) class StateExportUnitTests(test_utils.GenericTestBase): """Test export of states.""" def test_export_state_to_dict(self): """Test exporting a state to a dict.""" exploration = exp_domain.Exploration.create_default_exploration( 'exp_id') exploration.add_states(['New state']) state_dict = exploration.states['New state'].to_dict() expected_dict = { 'classifier_model_id': None, 'content': { 'html': '', 'audio_translations': {} }, 'interaction': { 'answer_groups': [], 'confirmed_unclassified_answers': [], 'customization_args': {}, 'default_outcome': { 'dest': 'New state', 'feedback': [], 'param_changes': [], }, 'fallbacks': [], 'hints': [], 'id': None, 'solution': {}, }, 'param_changes': [], } self.assertEqual(expected_dict, state_dict) class YamlCreationUnitTests(test_utils.GenericTestBase): """Test creation of explorations from YAML files.""" EXP_ID = 'An exploration_id' def test_yaml_import_and_export(self): """Test the from_yaml() and to_yaml() methods.""" exploration = exp_domain.Exploration.create_default_exploration( self.EXP_ID, title='Title', category='Category') exploration.add_states(['New state']) self.assertEqual(len(exploration.states), 2) exploration.states['New state'].update_interaction_fallbacks([{ 'trigger': { 'trigger_type': 'NthResubmission', 'customization_args': { 'num_submits': { 'value': 42, }, }, }, 'outcome': { 'dest': 'New state', 'feedback': [], 'param_changes': [], }, }]) exploration.validate() yaml_content = exploration.to_yaml() self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT) exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content) self.assertEqual(len(exploration2.states), 2) yaml_content_2 = exploration2.to_yaml() self.assertEqual(yaml_content_2, yaml_content) with self.assertRaises(Exception): exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name') with self.assertRaises(Exception): exp_domain.Exploration.from_yaml( 'exp4', 'Invalid\ninit_state_name:\nMore stuff') with self.assertRaises(Exception): exp_domain.Exploration.from_yaml( 'exp4', 'State1:\n(\nInvalid yaml') with self.assertRaisesRegexp( Exception, 'Expected a YAML version >= 10, received: 9' ): exp_domain.Exploration.from_yaml( 'exp4', SAMPLE_UNTITLED_YAML_CONTENT) with self.assertRaisesRegexp( Exception, 'Expected a YAML version <= 9' ): exp_domain.Exploration.from_untitled_yaml( 'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT) def test_yaml_import_and_export_without_gadgets(self): """Test from_yaml() and to_yaml() methods without gadgets.""" exploration_without_gadgets = exp_domain.Exploration.from_yaml( self.EXP_ID, SAMPLE_YAML_CONTENT) yaml_content = exploration_without_gadgets.to_yaml() self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT) def test_yaml_import_and_export_with_gadgets(self): """Test from_yaml() and to_yaml() methods including gadgets.""" exploration_with_gadgets = exp_domain.Exploration.from_yaml( self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS) with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): generated_yaml = exploration_with_gadgets.to_yaml() generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml) sample_yaml_as_dict = utils.dict_from_yaml( SAMPLE_YAML_CONTENT_WITH_GADGETS) self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict) class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase): """Tests the presence of appropriate schema migration methods in the Exploration domain object class. """ def test_correct_states_schema_conversion_methods_exist(self): """Test that the right states schema conversion methods exist.""" current_states_schema_version = ( feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION) for version_num in range(current_states_schema_version): self.assertTrue(hasattr( exp_domain.Exploration, '_convert_states_v%s_dict_to_v%s_dict' % ( version_num, version_num + 1))) self.assertFalse(hasattr( exp_domain.Exploration, '_convert_states_v%s_dict_to_v%s_dict' % ( current_states_schema_version, current_states_schema_version + 1))) def test_correct_exploration_schema_conversion_methods_exist(self): """Test that the right exploration schema conversion methods exist.""" current_exp_schema_version = ( exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION) for version_num in range(1, current_exp_schema_version): self.assertTrue(hasattr( exp_domain.Exploration, '_convert_v%s_dict_to_v%s_dict' % ( version_num, version_num + 1))) self.assertFalse(hasattr( exp_domain.Exploration, '_convert_v%s_dict_to_v%s_dict' % ( current_exp_schema_version, current_exp_schema_version + 1))) class SchemaMigrationUnitTests(test_utils.GenericTestBase): """Test migration methods for yaml content.""" YAML_CONTENT_V1 = ("""default_skin: conversation_v1 param_changes: [] param_specs: {} schema_version: 1 states: - content: - type: text value: '' name: (untitled state) param_changes: [] widget: customization_args: {} handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] sticky: false widget_id: TextInput - content: - type: text value: '' name: New state param_changes: [] widget: customization_args: {} handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] sticky: false widget_id: TextInput """) YAML_CONTENT_V2 = ("""default_skin: conversation_v1 init_state_name: (untitled state) param_changes: [] param_specs: {} schema_version: 2 states: (untitled state): content: - type: text value: '' param_changes: [] widget: customization_args: {} handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] sticky: false widget_id: TextInput New state: content: - type: text value: '' param_changes: [] widget: customization_args: {} handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] sticky: false widget_id: TextInput """) YAML_CONTENT_V3 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 3 skill_tags: [] states: (untitled state): content: - type: text value: '' param_changes: [] widget: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] sticky: false widget_id: TextInput New state: content: - type: text value: '' param_changes: [] widget: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] sticky: false widget_id: TextInput """) YAML_CONTENT_V4 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 4 skill_tags: [] states: (untitled state): content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] id: TextInput param_changes: [] New state: content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] id: TextInput param_changes: [] """) YAML_CONTENT_V5 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 5 skin_customizations: panels_contents: {} states: (untitled state): content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] id: TextInput param_changes: [] New state: content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] id: TextInput param_changes: [] tags: [] """) YAML_CONTENT_V6 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 6 skin_customizations: panels_contents: {} states: (untitled state): content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: inputs: x: InputString name: Equals rule_type: atomic dest: END feedback: - Correct! param_changes: [] - definition: rule_type: default dest: (untitled state) feedback: [] param_changes: [] id: TextInput triggers: [] param_changes: [] END: content: - type: text value: Congratulations, you have finished! interaction: customization_args: recommendedExplorationIds: value: [] handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] id: EndExploration triggers: [] param_changes: [] New state: content: - type: text value: '' interaction: customization_args: placeholder: value: '' rows: value: 1 handlers: - name: submit rule_specs: - definition: rule_type: default dest: END feedback: [] param_changes: [] id: TextInput triggers: [] param_changes: [] states_schema_version: 3 tags: [] """) YAML_CONTENT_V7 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 7 skin_customizations: panels_contents: {} states: (untitled state): content: - type: text value: '' interaction: answer_groups: - outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] id: TextInput triggers: [] param_changes: [] END: content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null id: EndExploration triggers: [] param_changes: [] New state: content: - type: text value: '' interaction: answer_groups: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] id: TextInput triggers: [] param_changes: [] states_schema_version: 4 tags: [] """) YAML_CONTENT_V8 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 8 skin_customizations: panels_contents: {} states: (untitled state): content: - type: text value: '' interaction: answer_groups: - outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] END: content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] id: EndExploration param_changes: [] New state: content: - type: text value: '' interaction: answer_groups: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] states_schema_version: 5 tags: [] """) YAML_CONTENT_V9 = ("""author_notes: '' blurb: '' default_skin: conversation_v1 init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 9 skin_customizations: panels_contents: {} states: (untitled state): content: - type: text value: '' interaction: answer_groups: - outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] END: content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] id: EndExploration param_changes: [] New state: content: - type: text value: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] states_schema_version: 6 tags: [] """) YAML_CONTENT_V10 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 10 skin_customizations: panels_contents: bottom: [] states: (untitled state): content: - type: text value: '' interaction: answer_groups: - outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] END: content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] id: EndExploration param_changes: [] New state: content: - type: text value: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] states_schema_version: 7 tags: [] title: Title """) YAML_CONTENT_V11 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 11 skin_customizations: panels_contents: bottom: [] states: (untitled state): classifier_model_id: null content: - type: text value: '' interaction: answer_groups: - outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] END: classifier_model_id: null content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] id: EndExploration param_changes: [] New state: classifier_model_id: null content: - type: text value: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] states_schema_version: 8 tags: [] title: Title """) YAML_CONTENT_V12 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 12 skin_customizations: panels_contents: bottom: [] states: (untitled state): classifier_model_id: null content: - type: text value: '' interaction: answer_groups: - correct: false outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] END: classifier_model_id: null content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] id: EndExploration param_changes: [] New state: classifier_model_id: null content: - type: text value: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] id: TextInput param_changes: [] states_schema_version: 9 tags: [] title: Title """) YAML_CONTENT_V13 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 13 skin_customizations: panels_contents: bottom: [] states: (untitled state): classifier_model_id: null content: - type: text value: '' interaction: answer_groups: - correct: false outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] END: classifier_model_id: null content: - type: text value: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] hints: [] id: EndExploration solution: {} param_changes: [] New state: classifier_model_id: null content: - type: text value: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] states_schema_version: 10 tags: [] title: Title """) YAML_CONTENT_V14 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 14 skin_customizations: panels_contents: bottom: [] states: (untitled state): classifier_model_id: null content: audio_translations: [] html: '' interaction: answer_groups: - correct: false outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] END: classifier_model_id: null content: audio_translations: [] html: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] hints: [] id: EndExploration solution: {} param_changes: [] New state: classifier_model_id: null content: audio_translations: [] html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] states_schema_version: 11 tags: [] title: Title """) YAML_CONTENT_V15 = ("""author_notes: '' blurb: '' category: Category init_state_name: (untitled state) language_code: en objective: '' param_changes: [] param_specs: {} schema_version: 15 skin_customizations: panels_contents: bottom: [] states: (untitled state): classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: - correct: false outcome: dest: END feedback: - Correct! param_changes: [] rule_specs: - inputs: x: InputString rule_type: Equals confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: (untitled state) feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] END: classifier_model_id: null content: audio_translations: {} html: Congratulations, you have finished! interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: recommendedExplorationIds: value: [] default_outcome: null fallbacks: [] hints: [] id: EndExploration solution: {} param_changes: [] New state: classifier_model_id: null content: audio_translations: {} html: '' interaction: answer_groups: [] confirmed_unclassified_answers: [] customization_args: placeholder: value: '' rows: value: 1 default_outcome: dest: END feedback: [] param_changes: [] fallbacks: [] hints: [] id: TextInput solution: {} param_changes: [] states_schema_version: 12 tags: [] title: Title """) _LATEST_YAML_CONTENT = YAML_CONTENT_V15 def test_load_from_v1(self): """Test direct loading from a v1 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V1) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v2(self): """Test direct loading from a v2 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V2) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v3(self): """Test direct loading from a v3 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V3) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v4(self): """Test direct loading from a v4 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V4) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v5(self): """Test direct loading from a v5 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V5) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v6(self): """Test direct loading from a v6 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V6) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v7(self): """Test direct loading from a v7 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V7) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v8(self): """Test direct loading from a v8 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V8) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v9(self): """Test direct loading from a v9 yaml file.""" exploration = exp_domain.Exploration.from_untitled_yaml( 'eid', 'Title', 'Category', self.YAML_CONTENT_V9) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v10(self): """Test direct loading from a v10 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V10) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v11(self): """Test direct loading from a v11 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V11) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v12(self): """Test direct loading from a v12 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V12) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v13(self): """Test direct loading from a v13 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V13) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v14(self): """Test direct loading from a v14 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V14) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) def test_load_from_v15(self): """Test direct loading from a v15 yaml file.""" exploration = exp_domain.Exploration.from_yaml( 'eid', self.YAML_CONTENT_V15) self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT) class ConversionUnitTests(test_utils.GenericTestBase): """Test conversion methods.""" def test_convert_exploration_to_player_dict(self): exp_title = 'Title' second_state_name = 'first state' exploration = exp_domain.Exploration.create_default_exploration( 'eid', title=exp_title, category='Category') exploration.add_states([second_state_name]) def _get_default_state_dict(content_str, dest_name): return { 'classifier_model_id': None, 'content': { 'audio_translations': {}, 'html': content_str, }, 'interaction': { 'answer_groups': [], 'confirmed_unclassified_answers': [], 'customization_args': {}, 'default_outcome': { 'dest': dest_name, 'feedback': [], 'param_changes': [], }, 'fallbacks': [], 'hints': [], 'id': None, 'solution': {}, }, 'param_changes': [], } self.assertEqual(exploration.to_player_dict(), { 'init_state_name': feconf.DEFAULT_INIT_STATE_NAME, 'title': exp_title, 'states': { feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict( feconf.DEFAULT_INIT_STATE_CONTENT_STR, feconf.DEFAULT_INIT_STATE_NAME), second_state_name: _get_default_state_dict( '', second_state_name), }, 'param_changes': [], 'param_specs': {}, 'skin_customizations': ( exp_domain.SkinInstance._get_default_skin_customizations() # pylint: disable=protected-access ), 'language_code': 'en', }) class StateOperationsUnitTests(test_utils.GenericTestBase): """Test methods operating on states.""" def test_can_undergo_classification(self): """Test the can_undergo_classification() function.""" exploration_id = 'eid' test_exp_filepath = os.path.join( feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) assets_list = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id, assets_list) exploration = exp_services.get_exploration_by_id(exploration_id) state_with_training_data = exploration.states['Home'] state_without_training_data = exploration.states['End'] # A state with 786 training examples. self.assertTrue( state_with_training_data.can_undergo_classification()) # A state with no training examples. self.assertFalse( state_without_training_data.can_undergo_classification()) def test_get_training_data(self): """Test retrieval of training data.""" exploration_id = 'eid' test_exp_filepath = os.path.join( feconf.SAMPLE_EXPLORATIONS_DIR, 'classifier_demo_exploration.yaml') yaml_content = utils.get_file_contents(test_exp_filepath) assets_list = [] exp_services.save_new_exploration_from_yaml_and_assets( feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id, assets_list) exploration = exp_services.get_exploration_by_id(exploration_id) state = exploration.states['text'] expected_training_data = [{ 'answer_group_index': 1, 'answers': [u'cheerful', u'merry', u'ecstatic', u'glad', u'overjoyed', u'pleased', u'thrilled', u'smile']}] observed_training_data = state.get_training_data() self.assertEqual(observed_training_data, expected_training_data) def test_delete_state(self): """Test deletion of states.""" exploration = exp_domain.Exploration.create_default_exploration('eid') exploration.add_states(['first state']) with self.assertRaisesRegexp( ValueError, 'Cannot delete initial state' ): exploration.delete_state(exploration.init_state_name) exploration.add_states(['second state']) exploration.delete_state('second state') with self.assertRaisesRegexp(ValueError, 'fake state does not exist'): exploration.delete_state('fake state') def test_state_operations(self): """Test adding, updating and checking existence of states.""" exploration = exp_domain.Exploration.create_default_exploration('eid') self.assertNotIn('invalid_state_name', exploration.states) self.assertEqual(len(exploration.states), 1) default_state_name = exploration.init_state_name exploration.rename_state(default_state_name, 'Renamed state') self.assertEqual(len(exploration.states), 1) self.assertEqual(exploration.init_state_name, 'Renamed state') # Add a new state. exploration.add_states(['State 2']) self.assertEqual(len(exploration.states), 2) # It is OK to rename a state to the same name. exploration.rename_state('State 2', 'State 2') # But it is not OK to add or rename a state using a name that already # exists. with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): exploration.add_states(['State 2']) with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): exploration.rename_state('State 2', 'Renamed state') # And it is OK to rename a state to 'END' (old terminal pseudostate). It # is tested throughout this test because a lot of old behavior used to # be specific to states named 'END'. These tests validate that is no # longer the situation. exploration.rename_state('State 2', 'END') # Should successfully be able to name it back. exploration.rename_state('END', 'State 2') # The exploration now has exactly two states. self.assertNotIn(default_state_name, exploration.states) self.assertIn('Renamed state', exploration.states) self.assertIn('State 2', exploration.states) # Can successfully add 'END' state exploration.add_states(['END']) # Should fail to rename like any other state with self.assertRaisesRegexp(ValueError, 'Duplicate state name'): exploration.rename_state('State 2', 'END') # Ensure the other states are connected to END exploration.states[ 'Renamed state'].interaction.default_outcome.dest = 'State 2' exploration.states['State 2'].interaction.default_outcome.dest = 'END' # Ensure the other states have interactions exploration.states['Renamed state'].update_interaction_id('TextInput') exploration.states['State 2'].update_interaction_id('TextInput') # Other miscellaneous requirements for validation exploration.title = 'Title' exploration.category = 'Category' exploration.objective = 'Objective' # The exploration should NOT be terminable even though it has a state # called 'END' and everything else is connected to it. with self.assertRaises(Exception): exploration.validate(strict=True) # Renaming the node to something other than 'END' and giving it an # EndExploration is enough to validate it, though it cannot have a # default outcome or answer groups. exploration.rename_state('END', 'AnotherEnd') another_end_state = exploration.states['AnotherEnd'] another_end_state.update_interaction_id('EndExploration') another_end_state.interaction.default_outcome = None exploration.validate(strict=True) # Name it back for final tests exploration.rename_state('AnotherEnd', 'END') # Should be able to successfully delete it exploration.delete_state('END') self.assertNotIn('END', exploration.states) class GadgetOperationsUnitTests(test_utils.GenericTestBase): """Test methods operating on gadgets.""" def test_gadget_operations(self): """Test deletion of gadgets.""" exploration = exp_domain.Exploration.create_default_exploration('eid') with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): exploration.add_gadget(TEST_GADGET_DICT, 'bottom') self.assertEqual(exploration.skin_instance.panel_contents_dict[ 'bottom'][0].type, TEST_GADGET_DICT['gadget_type']) self.assertEqual(exploration.skin_instance.panel_contents_dict[ 'bottom'][0].name, TEST_GADGET_DICT['gadget_name']) with self.assertRaisesRegexp( ValueError, 'Gadget NotARealGadget does not exist.' ): exploration.rename_gadget('NotARealGadget', 'ANewName') exploration.rename_gadget( TEST_GADGET_DICT['gadget_name'], 'ANewName') self.assertEqual(exploration.skin_instance.panel_contents_dict[ 'bottom'][0].name, 'ANewName') # Add another gadget. with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): exploration.add_gadget(TEST_GADGET_DICT, 'bottom') self.assertEqual( exploration.get_all_gadget_names(), ['ANewName', 'ATestGadget'] ) with self.assertRaisesRegexp( ValueError, 'Duplicate gadget name: ANewName' ): exploration.rename_gadget('ATestGadget', 'ANewName') gadget_instance = exploration.get_gadget_instance_by_name( 'ANewName') self.assertIs( exploration.skin_instance.panel_contents_dict['bottom'][0], gadget_instance ) panel = exploration._get_panel_for_gadget('ANewName') # pylint: disable=protected-access self.assertEqual(panel, 'bottom') exploration.delete_gadget('ANewName') exploration.delete_gadget('ATestGadget') self.assertEqual(exploration.skin_instance.panel_contents_dict[ 'bottom'], []) with self.assertRaisesRegexp( ValueError, 'Gadget ANewName does not exist.' ): exploration.delete_gadget('ANewName') class SkinInstanceUnitTests(test_utils.GenericTestBase): """Test methods for SkinInstance.""" _SAMPLE_SKIN_INSTANCE_DICT = { 'skin_id': 'conversation_v1', 'skin_customizations': { 'panels_contents': { 'bottom': [ { 'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS, 'gadget_type': 'TestGadget', 'gadget_name': 'ATestGadget', 'visible_in_states': ['New state', 'Second state'] } ] } } } def test_get_state_names_required_by_gadgets(self): """Test accurate computation of state_names_required_by_gadgets.""" skin_instance = exp_domain.SkinInstance( 'conversation_v1', self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations']) self.assertEqual( skin_instance.get_state_names_required_by_gadgets(), ['New state', 'Second state']) def test_generation_of_get_default_skin_customizations(self): """Tests that default skin customizations are created properly.""" skin_instance = exp_domain.SkinInstance(feconf.DEFAULT_SKIN_ID, None) self.assertEqual( skin_instance.panel_contents_dict, {'bottom': []} ) def test_conversion_of_skin_to_and_from_dict(self): """Tests conversion of SkinInstance to and from dict representations.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) skin_instance = exploration.skin_instance skin_instance_as_dict = skin_instance.to_dict() self.assertEqual( skin_instance_as_dict, self._SAMPLE_SKIN_INSTANCE_DICT) skin_instance_as_instance = exp_domain.SkinInstance.from_dict( skin_instance_as_dict) self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1') self.assertEqual( sorted(skin_instance_as_instance.panel_contents_dict.keys()), ['bottom']) class GadgetInstanceUnitTests(test_utils.GenericTestBase): """Tests methods instantiating and validating GadgetInstances.""" def test_gadget_instantiation(self): """Test instantiation of GadgetInstances.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) self.assertEqual(len(exploration.skin_instance.panel_contents_dict[ 'bottom']), 1) def test_gadget_instance_properties(self): """Test accurate representation of gadget properties.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) panel_contents_dict = exploration.skin_instance.panel_contents_dict with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): test_gadget_instance = panel_contents_dict['bottom'][0] self.assertEqual(test_gadget_instance.height, 50) self.assertEqual(test_gadget_instance.width, 60) self.assertIn('New state', test_gadget_instance.visible_in_states) def test_gadget_instance_validation(self): """Test validation of GadgetInstance.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) panel_contents_dict = exploration.skin_instance.panel_contents_dict with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS): test_gadget_instance = panel_contents_dict['bottom'][0] # Validation against sample YAML should pass without error. exploration.validate() # Assert size exceeded error triggers when a gadget's size exceeds # a panel's capacity. with self.swap( test_gadget_instance.gadget, 'width_px', 4600): self._assert_validation_error( exploration, 'Width 4600 of panel \'bottom\' exceeds limit of 350') # Assert internal validation against CustomizationArgSpecs. test_gadget_instance.customization_args[ 'adviceObjects']['value'].extend( [ {'adviceTitle': 'test_title', 'adviceHtml': 'test html'}, {'adviceTitle': 'another_title', 'adviceHtml': 'more html'}, {'adviceTitle': 'third_title', 'adviceHtml': 'third html'} ] ) with self.assertRaisesRegexp( utils.ValidationError, 'TestGadget is limited to 3 tips, found 4.' ): test_gadget_instance.validate() test_gadget_instance.customization_args[ 'adviceObjects']['value'].pop() # Assert that too many gadgets in a panel raise a ValidationError. panel_contents_dict['bottom'].append(test_gadget_instance) with self.assertRaisesRegexp( utils.ValidationError, '\'bottom\' panel expected at most 1 gadget, but 2 gadgets are ' 'visible in state \'New state\'.' ): exploration.validate() # Assert that an error is raised when a gadget is not visible in any # states. test_gadget_instance.visible_in_states = [] with self.assertRaisesRegexp( utils.ValidationError, 'TestGadget gadget not visible in any states.'): test_gadget_instance.validate() def test_conversion_of_gadget_instance_to_and_from_dict(self): """Test conversion of GadgetInstance to and from dict. """ exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) panel_contents_dict = exploration.skin_instance.panel_contents_dict test_gadget_instance = panel_contents_dict['bottom'][0] test_gadget_as_dict = test_gadget_instance.to_dict() self.assertEqual( test_gadget_as_dict, { 'gadget_type': 'TestGadget', 'gadget_name': 'ATestGadget', 'visible_in_states': ['New state', 'Second state'], 'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS } ) test_gadget_as_instance = exp_domain.GadgetInstance.from_dict( test_gadget_as_dict) self.assertEqual(test_gadget_as_instance.width, 60) self.assertEqual(test_gadget_as_instance.height, 50) class GadgetVisibilityInStatesUnitTests(test_utils.GenericTestBase): """Tests methods affecting gadget visibility in states.""" def test_retrieving_affected_gadgets(self): """Test that appropriate gadgets are retrieved.""" exploration = exp_domain.Exploration.from_yaml( 'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS) affected_gadget_instances = ( exploration._get_gadget_instances_visible_in_state('Second state')) # pylint: disable=protected-access self.assertEqual(len(affected_gadget_instances), 1) self.assertEqual(affected_gadget_instances[0].name, 'ATestGadget')
{ "content_hash": "67e5dbb0bdb83aebf43c0def5fef9a48", "timestamp": "", "source": "github", "line_count": 2845, "max_line_length": 115, "avg_line_length": 30.85202108963093, "alnum_prop": 0.5767995078269191, "repo_name": "shaz13/oppia", "id": "aa2ecf78dfedb16400fecc29dbad18d3390003b6", "size": "88397", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "core/domain/exp_domain_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "102650" }, { "name": "HTML", "bytes": "944588" }, { "name": "JavaScript", "bytes": "2788895" }, { "name": "Python", "bytes": "3656185" }, { "name": "Shell", "bytes": "46842" } ], "symlink_target": "" }
"""Functional tests for segment reduction ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf class SegmentReductionHelper(tf.test.TestCase): def _input(self, input_shape, dtype=tf.int32): num_elem = 1 for x in input_shape: num_elem *= x values = np.arange(1, num_elem + 1) np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype) return tf.constant(values, shape=input_shape, dtype=dtype), np_values def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None): if not x.size: return np.array([]) indices = np.asarray(indices) if num_out_rows is None: num_out_rows = indices[-1] + 1 output = [None] * num_out_rows slice_shape = x.shape[indices.ndim:] x_flat = x.reshape((indices.size,) + slice_shape) for i, index in enumerate(indices.ravel()): if output[index] is not None: output[index] = op1(output[index], x_flat[i]) else: output[index] = x_flat[i] # zero initialize values that are still uncalcuated. output = [o if o is not None else np.zeros(slice_shape) for o in output] if op2 is not None: output = [op2(o) for o in output] output = [o.reshape(slice_shape) for o in output] return np.array(output) def _assertAllClose(self, indices, np_x, tf_x): for i in set(np.asarray(indices).ravel()): self.assertAllClose(np_x[i], tf_x[i]) def _mean_cum_op(self, x, y): return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2) def _mean_reduce_op(self, x): return x[0] / x[1] if isinstance(x, tuple) else x class SegmentReductionOpTest(SegmentReductionHelper): def testValues(self): dtypes = [tf.float32, tf.float64, tf.int64, tf.int32] # Each item is np_op1, np_op2, tf_op ops_list = [(np.add, None, tf.segment_sum), (self._mean_cum_op, self._mean_reduce_op, tf.segment_mean), (np.ndarray.__mul__, None, tf.segment_prod), (np.minimum, None, tf.segment_min), (np.maximum, None, tf.segment_max)] n = 10 shape = [n, 2] indices = [i // 3 for i in range(n)] for dtype in dtypes: with self.test_session(use_gpu=False): tf_x, np_x = self._input(shape, dtype=dtype) for np_op1, np_op2, tf_op in ops_list: np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2) s = tf_op(data=tf_x, segment_ids=indices) tf_ans = s.eval() self._assertAllClose(indices, np_ans, tf_ans) # NOTE(mrry): The static shape inference that computes # `tf_ans.shape` can only infer that sizes from dimension 1 # onwards, because the size of dimension 0 is data-dependent # and may therefore vary dynamically. self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:]) def testSegmentIdsShape(self): shape = [4, 4] tf_x, _ = self._input(shape) indices = tf.constant([0, 1, 2, 2], shape=[2, 2]) with self.assertRaises(ValueError): tf.segment_sum(data=tf_x, segment_ids=indices) def testSegmentIdsSize(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [0, 1] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment_ids should be the same size"): s.eval() def testSegmentIdsValid(self): # This is a baseline for the following SegmentIdsInvalid* tests. shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [0, 0, 0, 1] result = tf.segment_sum(data=tf_x, segment_ids=indices).eval() self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result) def testSegmentIdsInvalid1(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [-1, -1, 0, 0] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids do not start at 0"): s.eval() def testSegmentIdsInvalid2(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [1, 1, 2, 2] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids do not start at 0"): s.eval() def testSegmentIdsInvalid3(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [0, 0, 2, 2] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids are not increasing by 1"): s.eval() def testSegmentIdsInvalid4(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [0, 1, 0, 1] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError("segment ids are not increasing by 1"): s.eval() def testSegmentIdsInvalid5(self): shape = [4, 4] with self.test_session(): tf_x, _ = self._input(shape) indices = [0, 1, 2, 0] s = tf.segment_sum(data=tf_x, segment_ids=indices) with self.assertRaisesOpError( r"Segment id 1 out of range \[0, 1\), probably " "because 'segment_ids' input is not sorted."): s.eval() def testGradient(self): shape = [4, 4] indices = [0, 1, 2, 2] for tf_op in [tf.segment_sum, tf.segment_mean, tf.segment_min, tf.segment_max]: with self.test_session(): tf_x, np_x = self._input(shape, dtype=tf.float64) s = tf_op(data=tf_x, segment_ids=indices) jacob_t, jacob_n = tf.test.compute_gradient( tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) class UnsortedSegmentSumTest(SegmentReductionHelper): def testValues(self): dtypes = [tf.float32, tf.float64, tf.int64, tf.int32] indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3]) num_segments = 12 for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (2,) for dtype in dtypes: with self.test_session(use_gpu=False): tf_x, np_x = self._input(shape, dtype=dtype) np_ans = self._segmentReduce(indices, np_x, np.add, op2=None, num_out_rows=num_segments) s = tf.unsorted_segment_sum(data=tf_x, segment_ids=indices, num_segments=num_segments) tf_ans = s.eval() self._assertAllClose(indices, np_ans, tf_ans) self.assertShapeEqual(np_ans, s) def testGradient(self): num_cols = 2 indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3]) num_segments = max(indices_flat) + 3 for indices in indices_flat, indices_flat.reshape(5, 2): shape = indices.shape + (num_cols,) with self.test_session(): tf_x, np_x = self._input(shape, dtype=tf.float64) s = tf.unsorted_segment_sum(data=tf_x, segment_ids=indices, num_segments=num_segments) jacob_t, jacob_n = tf.test.compute_gradient( tf_x, shape, s, [num_segments, num_cols], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) def testGradientMatchesSegmentSum(self): # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum # and compare the outputs, which should be identical. # NB: for this test to work, indices must be valid for SegmentSum, namely # it must be sorted, the indices must be contiguous, and num_segments # must be max(indices) + 1. indices = [0, 0, 1, 1, 1, 2, 3, 4, 5] n = len(indices) num_cols = 2 shape = [n, num_cols] num_segments = max(indices) + 1 with self.test_session(): tf_x, np_x = self._input(shape, dtype=tf.float64) # Results from UnsortedSegmentSum unsorted_s = tf.unsorted_segment_sum(data=tf_x, segment_ids=indices, num_segments=num_segments) (unsorted_jacob_t, unsorted_jacob_n) = tf.test.compute_gradient( tf_x, shape, unsorted_s, [num_segments, num_cols], x_init_value=np_x.astype(np.double), delta=1) # Results from SegmentSum sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices) sorted_jacob_t, sorted_jacob_n = tf.test.compute_gradient( tf_x, shape, sorted_s, [num_segments, num_cols], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3) self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3) def testBadIndices(self): with self.test_session(): for bad in [[-1]], [[7]]: unsorted = tf.unsorted_segment_sum([[17]], bad, num_segments=2) with self.assertRaisesOpError( r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]): unsorted.eval() class SparseSegmentReductionHelper(SegmentReductionHelper): def _sparse_input(self, input_shape, num_indices, dtype=tf.int32): a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype) indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32) return (tf.constant(indices, dtype=tf.int32), indices, a, b) def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None): return self._segmentReduce(segment_indices, x[indices], op1, op2) class SparseSegmentReductionOpTest(SparseSegmentReductionHelper): def testValues(self): dtypes = [tf.float32, tf.float64, tf.int64, tf.int32] mean_dtypes = [tf.float32, tf.float64] # Each item is np_op1, np_op2, tf_op ops_list = [(np.add, None, tf.sparse_segment_sum), (self._mean_cum_op, self._mean_reduce_op, tf.sparse_segment_mean)] n = 400 shape = [n, 2] segment_indices = [] for i in range(20): for _ in range(i + 1): segment_indices.append(i) num_indices = len(segment_indices) for dtype in dtypes: with self.test_session(use_gpu=False): tf_indices, np_indices, tf_x, np_x = self._sparse_input(shape, num_indices, dtype=dtype) for np_op1, np_op2, tf_op in ops_list: if tf_op == tf.sparse_segment_mean and dtype not in mean_dtypes: continue np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices, np_op1, np_op2) s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) tf_ans = s.eval() self._assertAllClose(segment_indices, np_ans, tf_ans) # NOTE(mrry): The static shape inference that computes # `tf_ans.shape` can only infer that sizes from dimension 1 # onwards, because the size of dimension 0 is data-dependent # and may therefore vary dynamically. self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:]) def testValid(self): # Baseline for the test*Invalid* methods below. tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) s.eval() def testIndiciesInvalid1(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, -1, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"indices\[1\] == -1 out of range \[0, 10\)"): s.eval() def testIndiciesInvalid2(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 10] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"indices\[3\] == 10 out of range \[0, 10\)"): s.eval() def testSegmentsInvalid1(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 2, 2, 2] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids are not increasing by 1"): s.eval() def testSegmentsInvalid2(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 1, 0, 1] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids are not increasing by 1"): s.eval() def testSegmentsInvalid3(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [0, 1, 2, 0] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError( r"Segment id 1 out of range \[0, 1\), probably because " "'segment_ids' input is not sorted"): s.eval() def testSegmentsInvalid4(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [-1, 0, 1, 1] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids do not start at 0"): s.eval() def testSegmentsInvalid5(self): tf_x, _ = self._input([10, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_sum, tf.sparse_segment_mean] segment_indices = [1, 2, 2, 2] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) with self.assertRaisesOpError("segment ids do not start at 0"): s.eval() def testGradient(self): shape = [10, 4] segment_indices = [0, 1, 2, 2] num_indices = len(segment_indices) for tf_op in [tf.sparse_segment_sum, tf.sparse_segment_mean]: with self.test_session(): tf_indices, _, tf_x, np_x = self._sparse_input( shape, num_indices, dtype=tf.float64) s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices) jacob_t, jacob_n = tf.test.compute_gradient( tf_x, shape, s, [3, 4], x_init_value=np_x.astype(np.double), delta=1) self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) def testGradientValid(self): # Baseline for the testGradient*Invalid* methods below. tf_x, _ = self._input([3, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) s.eval() def testGradientIndicesInvalid1(self): tf_x, _ = self._input([3, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, 0, 10] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"): s.eval() def testGradientIndicesInvalid2(self): tf_x, _ = self._input([3, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [0, 1, 2, 2] tf_indices = [8, 3, -1, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"): s.eval() def testGradientSegmentsInvalid1(self): tf_x, _ = self._input([3, 4], dtype=tf.float32) # expecting 3 segments ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [0, 1, 1, 1] # 2 segments tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError("Invalid number of segments"): s.eval() def testGradientSegmentsInvalid2(self): tf_x, _ = self._input([1, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [0, 1, 2, 0] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"): s.eval() def testGradientSegmentsInvalid3(self): tf_x, _ = self._input([2, 4], dtype=tf.float32) ops_list = [tf.sparse_segment_mean_grad, tf.sparse_segment_sqrt_n_grad] segment_indices = [-1, 0, 1, 1] tf_indices = [8, 3, 0, 9] with self.test_session(use_gpu=False): for tf_op in ops_list: s = tf_op(tf_x, tf_indices, segment_indices, 10) with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"): s.eval() if __name__ == "__main__": tf.test.main()
{ "content_hash": "46abb9a9d0169b10f3666e1c43649567", "timestamp": "", "source": "github", "line_count": 503, "max_line_length": 80, "avg_line_length": 38.37972166998012, "alnum_prop": 0.5816627816627816, "repo_name": "awni/tensorflow", "id": "c094be4ac0ead6e26c744b2930edc219d9588f29", "size": "19983", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/kernel_tests/segment_reduction_ops_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "156098" }, { "name": "C++", "bytes": "7765982" }, { "name": "CMake", "bytes": "29325" }, { "name": "CSS", "bytes": "1297" }, { "name": "HTML", "bytes": "684124" }, { "name": "Java", "bytes": "50361" }, { "name": "JavaScript", "bytes": "7188" }, { "name": "Jupyter Notebook", "bytes": "1771787" }, { "name": "Objective-C", "bytes": "1288" }, { "name": "Protocol Buffer", "bytes": "103762" }, { "name": "Python", "bytes": "4675299" }, { "name": "Shell", "bytes": "126103" }, { "name": "TypeScript", "bytes": "342627" } ], "symlink_target": "" }
from pox.core import core import pox.boot log = core.getLogger("tests") _first = True _tests = [] def _up (e): log.info("Starting") for test in _tests: log.info("Test %s", test) if pox.boot._do_import("tests." + test) is True: log.error("Test %s not found", test) return def launch (**kw): #__main__.cli = False # Disable CLI global _first if _first: core.addListenerByName("UpEvent", _up) _first = False for k in kw: if k not in _tests: _tests.append(k)
{ "content_hash": "8e0530b64a36105f99bd0ca4389e3a16", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 52, "avg_line_length": 19.615384615384617, "alnum_prop": 0.6019607843137255, "repo_name": "damomeen/pox-datapath", "id": "1b349a4bae72e1e36e2bd51e291906e758f0409b", "size": "1095", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "15247" }, { "name": "JavaScript", "bytes": "9135" }, { "name": "Python", "bytes": "1037929" }, { "name": "Shell", "bytes": "373" } ], "symlink_target": "" }
""" We create a singleton console instance at the module level or as an attribute of your top-level object. """ from rich.console import Console console = Console() printf = console.print
{ "content_hash": "e309deee63a08c56d28a7e91fd5db8ac", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 77, "avg_line_length": 21.11111111111111, "alnum_prop": 0.7578947368421053, "repo_name": "tanghaibao/jcvi", "id": "77a52cf26811a420093e1855e5cc1d7672f7e637", "size": "355", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "jcvi/utils/console.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Cython", "bytes": "10467" }, { "name": "Dockerfile", "bytes": "1150" }, { "name": "Makefile", "bytes": "445" }, { "name": "Python", "bytes": "2635155" } ], "symlink_target": "" }
#-----------------------------------------------------# # AI Final Project # # Front End # # # # # #-----------------------------------------------------# import os import random import shutil from features import * #import sklearn class IOclass: def __init__(self): self.authorName = raw_input("ENTER AUTHOR NAME: ") self.filename = raw_input("ENTER FILENAME FOR PASSAGE: ") self.authorsWorks = raw_input("ENTER DIRECTORY NAME FOR KNOWN WORKS ('none' to skip): ") def authorToVector(author): dir = os.path.dirname(os.path.realpath(__import__("__main__").__file__)) path = dir + '/bin/database/' path = path + author + '/' for filename in os.listdir(path): thisFile = open(path + filename, "r") #call feature vector code here! def movePassagesToDirectory(io): dir = os.path.dirname(os.path.realpath(__import__("__main__").__file__)) path = dir + '/input/' path = path + io.authorsWorks + '/' if not os.path.exists(path): print "ERROR: desired directory does not exist" return if not os.path.exists(dir + "/bin/database/" + io.authorName): os.makedirs(dir + "/bin/database/" + io.authorName) for filename in os.listdir(path): thisFile = open(path + filename, "r") for line in thisFile: # DO STUFF WITH INPUT DIRECTORY ----------------------- #print line # ----------------------------------------------------- # COPY INPUT DIRECTORY INTO DATABASE -------------------------- shutil.copy(path + filename, dir + "/bin/database/" + io.authorName + "/") # ------------------------------------------------------------- vector(path + filename, dir+"/bin/"+io.authorName+".txt") def main(): #Prompt user for file and directory name io = IOclass() #open input file filePath = "input/" + io.filename inputFile = open(filePath, "r") #FOR DEBUGGING (print input file data) ---------------------------------------- #for line in inputFile: # print line #------------------------------------------------------------------------------ if io.authorsWorks.lower() is 'none': print "no directory selected" else: movePassagesToDirectory(io) dir = os.path.dirname(os.path.realpath(__import__("__main__").__file__)) dir += "/bin/" featureList = [] classifierList = [] for filename in os.listdir(dir): if filename == "database": continue; authorFile = open(dir + filename, "r") for line in authorFile: authorVec = [] for feature in line.split(","): authorVec.append(float(feature)) featureList.append(authorVec) if filename == io.authorName + ".txt": classifierList.append(1) else: classifierList.append(0) inputVector = passageToFeature(filePath) print("Support Vector Machine:\n") from sklearn import svm print(" Creating...\n") train1 = svm.SVC(kernel='rbf') print(" Training...\n") train1.fit(featureList, classifierList) print(" Predicting...\n") result = train1.predict([inputVector]) if result ==0: print(" Result: "+"Forgery") else: print(" Result: "+"Legit") score=train1.score(featureList, classifierList) print(" Mean accuracy of the SVM (training set): "+str(score)+'\n') print("Nueral Network:\n") from pybrain.tools.shortcuts import buildNetwork print(" Creating...\n") from pybrain.structure import TanhLayer net = buildNetwork(len(featureList[0]), len(featureList[0])+1, 1, hiddenclass=TanhLayer) from pybrain.datasets import SupervisedDataSet #size= amount of features per feature vector ds = SupervisedDataSet(len(featureList[0]), 1) for item, classifier in zip(featureList,classifierList): ds.addSample(tuple(item),(classifier,)) print(" Training...\n") from pybrain.supervised.trainers import BackpropTrainer trainer = BackpropTrainer(net, ds) NUM_EPOCHS=100 for i in range(NUM_EPOCHS): error = trainer.train() error = trainer.train() print "Epoch: %d, Error: %7.4f" % (50, error) print(" Predicting...\n") result = net.activate(inputVector) print (" Result: "+str(result)[1:-1] + "% a forgery") if __name__ == '__main__': main()
{ "content_hash": "8f694ddefbfaff17330e16c4bab985bb", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 90, "avg_line_length": 27.066225165562916, "alnum_prop": 0.5989723513579642, "repo_name": "Behemyth/ForgeryML", "id": "ba43e8a37bb429c3b25b391b14f6e317aec8db60", "size": "4089", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "finalproject.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5823" } ], "symlink_target": "" }
import cProfile def do_cprofile(func): def profiled_func(*args, **kwargs): profile = cProfile.Profile() try: profile.enable() result = func(*args, **kwargs) profile.disable() return result finally: profile.print_stats() return profiled_func try: from line_profiler import LineProfiler def do_profile(follow=[]): def inner(func): def profiled_func(*args, **kwargs): try: profiler = LineProfiler() profiler.add_function(func) for f in follow: profiler.add_function(f) profiler.enable_by_count() return func(*args, **kwargs) finally: profiler.print_stats() return profiled_func return inner except ImportError: def do_profile(follow=[]): "Helpful if you accidentally leave in production!" def inner(func): def nothing(*args, **kwargs): return func(*args, **kwargs) return nothing return inner
{ "content_hash": "1cc0dd9280ed5b493394a1200aa8c0af", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 58, "avg_line_length": 29.425, "alnum_prop": 0.5089209855564996, "repo_name": "hasadna/OpenTrain", "id": "e38f7f2091367aba945e52854fe3097e2428d88b", "size": "1177", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "webserver/opentrain/algorithm/ot_profiler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "857" }, { "name": "CSS", "bytes": "22895" }, { "name": "JavaScript", "bytes": "276346" }, { "name": "Python", "bytes": "398492" }, { "name": "Shell", "bytes": "671" } ], "symlink_target": "" }
"Example showing how to add a column on a existing column" from tables import * class Particle(IsDescription): name = StringCol(16, pos=1) # 16-character String lati = Int32Col(pos=2) # integer longi = Int32Col(pos=3) # integer pressure = Float32Col(pos=4) # float (single-precision) temperature = Float64Col(pos=5) # double (double-precision) # Open a file in "w"rite mode fileh = openFile("add-column.h5", mode = "w") # Create a new group group = fileh.createGroup(fileh.root, "newgroup") # Create a new table in newgroup group table = fileh.createTable(group, 'table', Particle, "A table", Filters(1)) # Append several rows table.append([("Particle: 10", 10, 0, 10*10, 10**2), ("Particle: 11", 11, -1, 11*11, 11**2), ("Particle: 12", 12, -2, 12*12, 12**2)]) print "Contents of the original table:", fileh.root.newgroup.table[:] # close the file fileh.close() # Open it again in append mode fileh = openFile("add-column.h5", "a") group = fileh.root.newgroup table = group.table # Get a description of table in dictionary format descr = table.description._v_colObjects descr2 = descr.copy() # Add a column to description descr2["hot"] = BoolCol(dflt=False) # Create a new table with the new description table2 = fileh.createTable(group, 'table2', descr2, "A table", Filters(1)) # Copy the user attributes table.attrs._f_copy(table2) # Fill the rows of new table with default values for i in xrange(table.nrows): table2.row.append() # Flush the rows to disk table2.flush() # Copy the columns of source table to destination for col in descr: getattr(table2.cols, col)[:] = getattr(table.cols, col)[:] # Fill the new column table2.cols.hot[:] = [ row["temperature"] > 11**2 for row in table ] # Remove the original table table.remove() # Move table2 to table table2.move('/newgroup', 'table') # Print the new table print "Contents of the table with column added:", fileh.root.newgroup.table[:] # Finally, close the file fileh.close()
{ "content_hash": "6d895c4171a7cf06c78e167b5787e523", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 78, "avg_line_length": 29.112676056338028, "alnum_prop": 0.6729559748427673, "repo_name": "cpcloud/PyTables", "id": "670e2152cde79182d3b3d0012a4946014f2d2b69", "size": "2067", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "examples/add-column.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "273891" }, { "name": "JavaScript", "bytes": "3491" }, { "name": "Objective-C", "bytes": "1404" }, { "name": "Python", "bytes": "3374653" }, { "name": "Scala", "bytes": "138" }, { "name": "Shell", "bytes": "23442" } ], "symlink_target": "" }
from __future__ import absolute_import from . import _graph as __graph from ._graph import * from .. import Configuration from . import opt from . opt import multicut from . opt import lifted_multicut from . opt import mincut from . opt import minstcut import numpy from functools import partial import types import sys __all__ = [] for key in __graph.__dict__.keys(): try: __graph.__dict__[key].__module__='nifty.graph' except: pass __all__.append(key) UndirectedGraph.__module__ = "nifty.graph" ilpSettings = multicut.ilpSettings # multicut objective UndirectedGraph.MulticutObjective = multicut.MulticutObjectiveUndirectedGraph UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph EdgeContractionGraphUndirectedGraph.MulticutObjective = multicut.MulticutObjectiveEdgeContractionGraphUndirectedGraph UndirectedGraph.MincutObjective = mincut.MincutObjectiveUndirectedGraph UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph EdgeContractionGraphUndirectedGraph.MincutObjective = mincut.MincutObjectiveEdgeContractionGraphUndirectedGraph # #minstcut objective # UndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveUndirectedGraph # UndirectedGraph.EdgeContractionGraph = EdgeContractionGraphUndirectedGraph # EdgeContractionGraphUndirectedGraph.MinstcutObjective = minstcut.MinstcutObjectiveEdgeContractionGraphUndirectedGraph # lifted multicut objective UndirectedGraph.LiftedMulticutObjective = lifted_multicut.LiftedMulticutObjectiveUndirectedGraph def randomGraph(numberOfNodes, numberOfEdges): g = UndirectedGraph(numberOfNodes) uv = numpy.random.randint(low=0, high=numberOfNodes-1, size=numberOfEdges*2) uv = uv.reshape([-1,2]) where = numpy.where(uv[:,0]!=uv[:,1])[0] uv = uv[where,:] g.insertEdges(uv) while( g.numberOfEdges < numberOfEdges): u,v = numpy.random.randint(low=0, high=numberOfNodes-1, size=2) if u != v: g.insertEdge(int(u),int(v)) return g class EdgeContractionGraphCallback(EdgeContractionGraphCallbackImpl): def __init__(self): super(EdgeContractionGraphCallback, self).__init__() try: self.contractEdgeCallback = self.contractEdge except AttributeError: pass try: self.mergeEdgesCallback = self.mergeEdges except AttributeError: pass try: self.mergeNodesCallback = self.mergeNodes except AttributeError: pass try: self.contractEdgeDoneCallback = self.contractEdgeDone except AttributeError: pass def edgeContractionGraph(g, callback): Ecg = g.__class__.EdgeContractionGraph ecg = Ecg(g, callback) return ecg def undirectedGraph(numberOfNodes): return UndirectedGraph(numberOfNodes) def undirectedGridGraph(shape, simpleNh=True): if not simpleNh: raise RuntimeError("currently only simpleNh is implemented") s = [int(s) for s in shape] if(len(s) == 2): return UndirectedGridGraph2DSimpleNh(s) elif(len(s) == 3): return UndirectedGridGraph3DSimpleNh(s) else: raise RuntimeError("currently only 2D and 3D grid graph is exposed to python") gridGraph = undirectedGridGraph def drawGraph(graph, method='spring'): import networkx G = networkx.Graph() for node in graph.nodes(): G.add_node(node) for edge in graph.edges(): u, v = graph.uv(edge) G.add_edge(u, v) nodeLabels = {node: str(node) for node in graph.nodes()} if method == 'spring': networkx.draw_spring(G, labels=nodeLabels) else: networkx.draw(G, lables=nodeLabels) def run_label_propagation(graph, edge_values=None, nb_iter=1, local_edges=None, size_constr=-1, nb_threads=-1): """ This function can be useful to obtain superpixels (alternative to WS superpixels for example). The usual label propagation algorithm (https://en.wikipedia.org/wiki/Label_propagation_algorithm) iterates over nodes of the graph in a random order: for every iteration and selected node u, the algorithm assigns u to the label occurring with the highest frequency among its neighbours (if there are multiple highest frequency labels, it selects a label at random). This process can be repeated multiple times (`nb_iter`) until the algorithm converges to a set of labels. This generalized implementation also supports signed edge values, so that node labels are not assigned to the neighboring label with higher frequency, but to the neighboring label with the highest positive edge interaction. By default, all edge values have weight +1 and the standard label propagation algorithm is performed. For example, a node with the following five-nodes neighborhood: - neighbor_1_label = 1, edge_weight = +2 - neighbor_2_label = 1, edge_weight = +5 - neighbor_3_label = 1, edge_weight = -2 - neighbor_4_label = 2, edge_weight = -5 - neighbor_5_label = 3, edge_weight = +5 will be randomly assigned to label 1 or 3 (given they have equal maximum attraction +5). :param graph: undirected graph :param edge_values: Optional signed edge weights. By default, all edges have equal weight +1 and the standard label propagation algorithm is performed . :param nb_iter: How many label propagation iterations to perform (one iteration = one loop over all the nodes of the graph) :param local_edges: Boolean array indicating which edges are local edges in the graph. If specified, then the algorithm proceeds as following: any given node can be assigned to the label of a neighboring cluster only if this cluster has at least one local edge connection to the node. :param size_constr: Whether or not to set a maximum size for the final clusters. The default value is -1 and no size constraint is applied. :param nb_threads: When multiple threads are used, multiple nodes are processed in parallel. :return: Newly assigned node labels """ nb_edges = graph.numberOfEdges edge_values = numpy.ones((nb_edges,), dtype="float32") if edge_values is None else edge_values assert edge_values.shape[0] == nb_edges if local_edges is not None: assert edge_values.shape == local_edges.shape local_edges = numpy.require(local_edges, dtype='bool') else: local_edges = numpy.ones_like(edge_values).astype('bool') # TODO: add support initial node_labels (need to specify initial cluster size) nb_nodes = graph.numberOfNodes node_labels = numpy.arange(0, nb_nodes) # if node_labels is None: # node_labels = numpy.arange(0, nb_nodes) # sizes = numpy.ones((nb_nodes,)) # else: # raise NotImplementedError() node_labels = numpy.require(node_labels, dtype='uint64') runLabelPropagation_impl(graph, node_labels, edge_values, local_edges, nb_iter, size_constr, nb_threads) return node_labels import numpy as np import nifty.graph.rag as nrag def accumulate_affinities_mean_and_length(affinities, offsets, labels, graph=None, affinities_weights=None, offset_weights=None, ignore_label=None, number_of_threads=-1): """ Features of this function (additional ones compared to other accumulate functions): - does not require a RAG but simply a graph and a label image (can include long-range edges) - can perform weighted average of affinities depending on given affinitiesWeights - ignore pixels with ignore label Parameters ---------- affinities: offset channels expected to be the first one """ affinities = np.require(affinities, dtype='float32') if affinities_weights is not None: assert offset_weights is None, "Affinities weights and offset weights cannot be passed at the same time" affinities_weights = np.require(affinities_weights, dtype='float32') else: affinities_weights = np.ones_like(affinities) if offset_weights is not None: offset_weights = np.require(offset_weights, dtype='float32') for _ in range(affinities_weights.ndim-1): offset_weights = np.expand_dims(offset_weights, axis=-1) affinities_weights *= offset_weights affinities = np.rollaxis(affinities, axis=0, start=len(affinities.shape)) affinities_weights = np.rollaxis(affinities_weights, axis=0, start=len(affinities_weights.shape)) offsets = np.require(offsets, dtype='int32') assert len(offsets.shape) == 2 if graph is None: graph = nrag.gridRag(labels) hasIgnoreLabel = (ignore_label is not None) ignore_label = 0 if ignore_label is None else int(ignore_label) number_of_threads = -1 if number_of_threads is None else number_of_threads edge_indicators_mean, edge_indicators_max, edge_sizes = \ accumulateAffinitiesMeanAndLength_impl_( graph, labels.astype('uint64'), affinities, affinities_weights, offsets, hasIgnoreLabel, ignore_label, number_of_threads ) return edge_indicators_mean, edge_sizes def accumulate_affinities_mean_and_length_inside_clusters(affinities, offsets, labels, offset_weights=None, ignore_label=None, number_of_threads=-1): """ Similar idea to `accumulate_affinities_mean_and_length`, but accumulates affinities/edge-values for all edges not on cut (i.e. connecting nodes in the same cluster) """ affinities = np.require(affinities, dtype='float32') affinities = np.rollaxis(affinities, axis=0, start=len(affinities.shape)) offsets = np.require(offsets, dtype='int32') assert len(offsets.shape) == 2 if offset_weights is None: offset_weights = np.ones(offsets.shape[0], dtype='float32') else: offset_weights = np.require(offset_weights, dtype='float32') hasIgnoreLabel = (ignore_label is not None) ignore_label = 0 if ignore_label is None else int(ignore_label) number_of_threads = -1 if number_of_threads is None else number_of_threads edge_indicators_mean, edge_indicators_max, edge_sizes = \ accumulateAffinitiesMeanAndLengthInsideClusters_impl_( labels.astype('uint64'), labels.max(), affinities, offsets, offset_weights, hasIgnoreLabel, ignore_label, number_of_threads ) return edge_indicators_mean, edge_sizes
{ "content_hash": "9978e4171b6ad1ccbf436c94f1fe7e2e", "timestamp": "", "source": "github", "line_count": 297, "max_line_length": 125, "avg_line_length": 37.4006734006734, "alnum_prop": 0.666996759092546, "repo_name": "DerThorsten/nifty", "id": "7fef6626ed00bdd9afdf4780c00e049c2cb6c56f", "size": "11146", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/python/module/nifty/graph/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "3367" }, { "name": "C", "bytes": "337" }, { "name": "C++", "bytes": "2367327" }, { "name": "CMake", "bytes": "67270" }, { "name": "HTML", "bytes": "8" }, { "name": "Python", "bytes": "366763" }, { "name": "Shell", "bytes": "1245" }, { "name": "TeX", "bytes": "4055" } ], "symlink_target": "" }
from server import EventServer import threading import random import time import json from message import MessageProtocol from enum import Enum import msgpack import sys import argparse lock = threading.Lock() class PacketId(Enum): JOIN = 0 WELCOME = 1 ACK = 2 HEARTBEAT = 3 PLAYER_INFO = 10 PLAYER_UPDATES = 11 PLAYER_LEFT = 12 PLAYER_INPUT = 20 PLAYER_FIRE = 21 WORLD_INFO = 30 BULLETS = 35 class PacketProtocol(MessageProtocol): def create(self, msg_type, payload, sequence_number=0, needs_ack=False): message = [msg_type.value, sequence_number, 1 if needs_ack else 0, payload] packed = msgpack.packb(message) return packed def parse(self, message): unpacked = msgpack.unpackb(message) unpacked[0] = PacketId(int(unpacked[0])) return unpacked def pack_data(self, data): return msgpack.packb(data, encoding='utf-8') def unpack_data(self, data): return msgpack.unpackb(data, encoding='utf-8') class PlayerClient: """ Server-side representation of every connected player. """ def __init__(self, player_id, client_addr): self.uuid = player_id self.color = ( random.uniform(0.0, 1.0), random.uniform(0.0, 1.0), random.uniform(0.0, 1.0)) self.position = [ random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)] self.address = client_addr self.speed = 10 # Player's current input, stored as <x> and <y> deltas self.movement = [0, 0] # which direction is the player "facing" # (aka, which way did he move last) self.facing = [1, 0] def set_movement(self, move): self.movement = move if move[0] != 0 or move[1] != 0: self.facing = move def as_dict(self): """ This is the information about the player that gets sent around to everyone. Color, position and rotation data are transformed from floats to integers for passing. This loses an acceptable degree of accuracy. """ data = { "uuid": self.uuid, "colorRed": int(self.color[0] * 255), "colorGreen": int(self.color[1] * 255), "colorBlue": int(self.color[2] * 255), "position": (int(self.position[0] * 1000), int(self.position[1] * 1000)) } return data class PacketInfo: def __init__(self, seq_number, sent_at, target, event, payload): self.sent_ticks = sent_at self.sequence_number = seq_number # id of the target player self.target = target # this is so we can send it again if needed self.payload = payload self.event = event class World: """ Server-side representation of the game world. """ def __init__(self, size=(20, 10)): # size of the world, in "units" self.width = size[0] self.height = size[1] def as_dict(self): return { "width": self.width, "height": self.height } class Bullet: """ Server-side representation of a bullet object. """ def __init__(self, pos, direct, created_by): self.position = pos self.direction = direct self.speed = 8 self.owner = created_by self.lifetime = 2.0 def as_dict(self): return { "position": [self.position[0] * 1000, self.position[1] * 1000], "rotation": self.rotation * 1000 } class GameServer: def __init__(self, settings): self._clients = {} self._socket_to_player = {} self._clients_to_remove = [] self._player_id_number = 0 # Binding Address self._server_address = (settings.host, int(settings.port)) self._socket_server = None self._server_thread = None self._sequence_number = 0 self._max_sequence_number = 10000 self._ack_needed = [] self.protocol = PacketProtocol() # game state stuff self._world = World() self._bullets = [] # game tick rate, in frames per second. self._tick_rate = int(settings.tickRate) # stats self._stat_timer = 5 self._stat_time = 5 self._stat_sent = 0 self._stat_sent_bandwidth = 0 def start(self): self._socket_server = EventServer(self._server_address) self._socket_server.heartbeat_rate = 35 self._socket_server._message_protocol = PacketProtocol() # set up handlers self._socket_server.on('connected', self.client_connected) self._socket_server.on('disconnected', self.client_disconnected) self._socket_server.on(PacketId.JOIN, self.player_join) self._socket_server.on(PacketId.PLAYER_INPUT, self.player_movement) self._socket_server.on(PacketId.ACK, self.received_ack) self._socket_server.on(PacketId.PLAYER_FIRE, self.player_fire) self._socket_server.on(PacketId.HEARTBEAT, self.received_heartbeat) self._server_thread = threading.Thread(target=self._socket_server.serve_forever) self._server_thread.daemon = True self._server_thread.start() print("Serving on {}".format(self._socket_server.socket.getsockname())) # for a fixed update tick last_time = time.time() loop_time = 1.0 / self._tick_rate loop_timer = 0 while True: time_now = time.time() delta = time_now - last_time last_time = time_now loop_timer += delta if loop_timer >= loop_time: if loop_timer >= loop_time * 1.5: print("----------------------") print("FRAMERATE DROPPED TO {}fps".format((1.0 / loop_timer))) print("----------------------") self.game_loop(loop_timer) loop_timer = 0 self._socket_server.shutdown() def next_player_id(self): self._player_id_number += 1 return self._player_id_number def next_sequence_number(self): this_seq = self._sequence_number if self._sequence_number < self._max_sequence_number: self._sequence_number += 1 else: print("SEQUENCE NUMBER LOOPED") self._sequence_number = 0 return this_seq def send(self, player_id, event, payload, needs_ack=False, seq_num=None): if player_id not in self._clients: return if not seq_num: seq_num = self.next_sequence_number() self._stat_sent += 1 player = self._clients[player_id] player_addr = player.address msg_bytes = self.protocol.create(event, payload, seq_num, needs_ack) if needs_ack: info = PacketInfo(seq_num, time.time(), player_id, event, payload) # print("new ACK for {} at time: {}".format(seq_num, info.sent_ticks)) self._ack_needed.append(info) self._stat_sent_bandwidth += sys.getsizeof(msg_bytes) self._socket_server.sendto(player_addr, msg_bytes) def send_all(self, event, payload, needs_ack=False): """Sends the message to all active players.""" for player_id, player in self._clients.items(): self.send(player_id, event, payload, needs_ack) def game_loop(self, dt): updated_players = [] self._stat_timer -= dt if self._stat_timer <= 0: self._stat_timer = self._stat_time with lock: sent = self._stat_sent self._stat_sent = 0 avg = sent / self._stat_time print("AVG MESSAGES SENT PER SECOND: {}".format(avg)) band = self._stat_sent_bandwidth self._stat_sent_bandwidth = 0 avg = band / self._stat_time amnt = "bytes" if avg > 1000000: avg /= 1000 avg /= 1000 amnt = "megabytes" elif avg > 10000: avg /= 1000 amnt = "kilobytes" print("AVG BANDWIDTH SENT PER SECOND: {} {}".format(avg, amnt)) if len(self._clients) > 0: avg = (sent / self._stat_time) / len(self._clients) print("AVG MESSAGES PER PLAYER: {}".format(avg)) with lock: # remove disconnected players for player_id in self._clients_to_remove: if player_id not in self._clients: continue player = self._clients[player_id] # send player_left message to everyone else self.send_all(PacketId.PLAYER_LEFT, self.protocol.pack_data(player.uuid)) del self._clients[player_id] self._clients_to_remove.clear() # loop through players and handle updates for player_id, player in self._clients.items(): if player.movement[0] != 0 or player.movement[1] != 0: player.position[0] += player.movement[0] * player.speed * dt player.position[1] += player.movement[1] * player.speed * dt if player.position[0] <= -self._world.width + 1: player.position[0] = -self._world.width + 1 elif player.position[0] >= self._world.width - 1: player.position[0] = self._world.width - 1 if player.position[1] < -self._world.height + 1: player.position[1] = -self._world.height + 1 elif player.position[1] >= self._world.height - 1: player.position[1] = self._world.height - 1 updated_players.append(player.as_dict()) if len(updated_players) > 0: # print("sending player updates for {} players".format(len(updated_players))) self.send_all(PacketId.PLAYER_UPDATES, self.protocol.pack_data(updated_players)) # update bullets dead_bullets = [] bullet_update = [] for bullet in self._bullets: bullet.lifetime -= dt if bullet.lifetime <= 0: dead_bullets.append(bullet) continue bullet.position[0] += bullet.direction[0] * bullet.speed * dt bullet.position[1] += bullet.direction[1] * bullet.speed * dt bullet_update.append(bullet.as_dict()) # remove dead bullets for bullet in dead_bullets: self._bullets.remove(bullet) # send bullet updates if some were updated or removed if len(bullet_update) > 0 or len(dead_bullets) > 0: self.send_all(PacketId.BULLETS, self.protocol.pack_data(bullet_update)) # loop through the Acks queue to see if we need to send more acks if len(self._ack_needed): resend_acks = [] while len(self._ack_needed) > 0: ack = self._ack_needed[0] since = time.time() - ack.sent_ticks if since >= 2: # resend and requeue # print("ACK needed for {}".format(ack.sequence_number)) resend_acks.append(self._ack_needed.pop()) else: # hit a young pack, quit for now # oldest packs will be at the front break for ack in resend_acks: self.send(ack.target, ack.event, ack.payload, True, ack.sequence_number) def sequence_more_recent(self, s1, s2): return (s1 > s2 and s1 - s2 <= self._max_sequence_number / 2) or (s2 > s1 and s2 - s1 > self._max_sequence_number/2) def player_join(self, msg, socket): pass def client_connected(self, msg, socket): """ Both 'connected' and 'disconnected' are events reserved by the server. It will call them automatically. """ with lock: player = PlayerClient(self.next_player_id(), socket) print("New client: {} is now player {}".format(socket, player.uuid)) self._clients[player.uuid] = player self._socket_to_player[socket] = player.uuid # send welcome # print(player.as_dict()) self.send(player.uuid, PacketId.WELCOME, self.protocol.pack_data(player.as_dict()), True) # send world, require acknowledge self.send(player.uuid, PacketId.WORLD_INFO, self.protocol.pack_data(self._world.as_dict()), True) def client_disconnected(self, msg, socket): player = self._clients[self._socket_to_player[socket]] print("Player {} has disconnected.".format(player.uuid)) if player.uuid in self._clients and player.uuid not in self._clients_to_remove: self._clients_to_remove.append(player.uuid) def player_movement(self, msg, socket): if socket not in self._socket_to_player: return player = self._clients[self._socket_to_player[socket]] movement = self.protocol.unpack_data(msg) # print("Got player input for {}: {}".format(player.uuid, movement)) player.set_movement(movement) def player_fire(self, msg, socket): if socket not in self._socket_to_player: return player = self._clients[self._socket_to_player[socket]] # create bullet bullet = Bullet(player.position, player.facing, player.uuid) self._bullets.append(bullet) def received_heartbeat(self, msg, socket): pass def received_ack(self, msg, socket): if socket not in self._socket_to_player: return acks = self.protocol.unpack_data(msg) with lock: for ack in acks: ackInfo = next((a for a in self._ack_needed if a.sequence_number == ack), None) if ackInfo and ackInfo: # print("ack received: {}".format(ackInfo.sequence_number)) self._ack_needed.remove(ackInfo) ARGS = argparse.ArgumentParser(description="Example Game Server") ARGS.add_argument( '--host', action="store", dest="host", default="", help="Address to bind to." ) ARGS.add_argument( '--port', action="store", dest="port", default='9999', help='What port to bind to.') ARGS.add_argument( '--tickRate', action="store", dest="tickRate", default="60", help="Tick rate of the game loop in frames per second." ) if __name__ == "__main__": args = ARGS.parse_args() game = GameServer(args) game.start()
{ "content_hash": "89fcd2061de03ab04c7a558b077a75a4", "timestamp": "", "source": "github", "line_count": 436, "max_line_length": 124, "avg_line_length": 34.53211009174312, "alnum_prop": 0.5498140276301806, "repo_name": "lxndrdagreat/udp-socket-server", "id": "5d02ed4f0d34c5db2fb115cbb0c3821fe52fb0b7", "size": "15170", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "example_game_server.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "27227" } ], "symlink_target": "" }
import boto import boto.rds2 import os, subprocess, time from boto.manage.cmdshell import sshclient_from_instance from boto.vpc import VPCConnection vpcc = VPCConnection() # ********** Configuration Constant ********** # BOOLEAN_DRYRUN = True BOOLEAN_DRYRUN = False PROJECT_TAG = "Stratus" ACCESS_KEY = "" SECRET_KEY = "" REGION_NAME = "us-east-1" AVAIL_ZONE1 = REGION_NAME + 'b' AVAIL_ZONE2 = REGION_NAME + 'c' AMI_IMAGE = "ami-60b6c60a" # Linux AMI 2015.09.1 (HVM), SSD; us-east VPC_CIDR = "10.0.2.0/24" VPC_NAME = "New VPC" VPC_SUBNET1 = "10.0.2.0/25" VPC_SUBNET2 = "10.0.2.128/25" VPC_TENANCY = "default" INSTANCE_SIZE = "t2.micro" DEFAULT_USER = "ec2-user" SG_PREFIX = "Security Tag - Stratus" KEY_DIR = "/home/" KEY_NAME = "StratusKeypair.pem" # ******************** ''' # ********** User interactive ********** print "Type CIDR block to use between /16 and /28. E.G. 10.0.0.0/24, Press 'Enter' when finished" VPC_CIDR = raw_input() print "Type the Name for VPC (to use as Tag), Press 'Enter' when finished" VPC_NAME = raw_input() ''' #boto.set_stream_logger("Stratus") print "Boto Version: ", boto.Version, "\n" # print boto.rds2.regions() exist_vpc = vpcc.get_all_vpcs(filters = [("cidrBlock", VPC_CIDR)]) if not len(exist_vpc): new_vpc = vpcc.create_vpc(cidr_block = VPC_CIDR, instance_tenancy = VPC_TENANCY, dry_run = BOOLEAN_DRYRUN) aws_vpc_id = new_vpc.id print "No existing VPC" print "New VPC created: ", aws_vpc_id, "\n" else: # Return 1st object from list aws_vpc_id = str(exist_vpc.pop(0))[4:] print "Identical CIDR already used. Skipped creation." print "Existing VPC ID: {}\n".format(aws_vpc_id) # Use existing VPC new_vpc = vpcc.get_all_vpcs(filters = [("cidrBlock", VPC_CIDR)]) ''' # Doesn't work due to VPC dependency print "Requested VPC already exists! Will attempt to delete vpc and recreate" del_status = vpcc.delete_vpc(aws_vpc_id) print "Deletion Completed", del_status ''' exist_subnet1 = vpcc.get_all_subnets(filters = [("cidrBlock", VPC_SUBNET1)]) exist_subnet2 = vpcc.get_all_subnets(filters = [("cidrBlock", VPC_SUBNET2)]) if not len(exist_subnet1): print "Creating new subnet ..." new_subnet1 = vpcc.create_subnet(aws_vpc_id, VPC_SUBNET1,AVAIL_ZONE1, dry_run = BOOLEAN_DRYRUN) subnet1_id = new_subnet1.id print "New subnet 1 ID: {}\n".format(subnet1_id) else: print "Subnet with {} already exists. Skipped creation".format(VPC_SUBNET1) subnet1_id = str(exist_subnet1.pop(0))[7:] print "Existing subnet 1 ID: {}\n".format(subnet1_id) if not len(exist_subnet2): print "Creating new subnet2 ..." new_subnet2 = vpcc.create_subnet(aws_vpc_id, VPC_SUBNET2,AVAIL_ZONE2, dry_run = BOOLEAN_DRYRUN) subnet2_id = new_subnet2.id print "New subnet 2 ID: {}\n".format(subnet2_id) else: print "Subnet with {} already exists. Skipped creation".format(VPC_SUBNET2) subnet2_id = str(exist_subnet2.pop(0))[7:] print "Existing subnet 2 ID: {}\n".format(subnet2_id) # ********** Connections ********** def connect(): ec2_connect = boto.ec2.connect_to_region(REGION_NAME) rds_connect = boto.rds2.connect_to_region(REGION_NAME) vpc_connect = boto.vpc.connect_to_region(REGION_NAME) # ec2_connect = boto.ec2.connect_to_region(REGION_NAME, # aws_access_key_id = ACCESS_KEY, aws_secret_access_key = SECRET_KEY) # rds_connect = boto.rds2.connect_to_region(REGION_NAME, # aws_access_key_id = ACCESS_KEY, aws_secret_access_key = SECRET_KEY) # vpc_connect = boto.vpc.connect_to_region(REGION_NAME, # aws_access_key_id = ACCESS_KEY, aws_secret_access_key = SECRET_KEY) return ec2_connect, rds_connect, vpc_connect ec2_connect, rds_connect, vpc_connect = connect() # ******************** # ********** VPC Block ********** print "********** VPC Block **********\n" #print "VPC Access to Zone:", vpc_connect.get_all_zones() vpc_connect.modify_vpc_attribute(aws_vpc_id, enable_dns_support = True) vpc_connect.modify_vpc_attribute(aws_vpc_id, enable_dns_hostnames = True) # Create Routing Table new_route_table = vpc_connect.create_route_table(aws_vpc_id, dry_run = BOOLEAN_DRYRUN) try: vpc_connect.associate_route_table(new_route_table.id, subnet1_id) except Exception as e: print "Alert: {}.\n".format(e.message) else: print "Subnet1 is successfully associated with Route Table.\n" try: vpc_connect.associate_route_table(new_route_table.id, subnet2_id) except Exception as e: print "Alert: {}.\n".format(e.message) else: print "Subnet2 is successfully associated with Route Table.\n" try: inet_gateway = vpc_connect.create_internet_gateway(dry_run=BOOLEAN_DRYRUN) except Exception as e: print "GW Create Alert: {}.\n".format(e.message) else: print "New Internet Gateway created: {}\n".format(inet_gateway) try: vpc_connect.attach_internet_gateway(inet_gateway.id, aws_vpc_id) except Exception as e: print "GW Attach Alert: {}.\n".format(e.message) else: print "Attach GW to VPC: Success\n" # To-do: check for existing IGW to use as ID instead try: inet_gw_route_status = vpc_connect.create_route( new_route_table.id, destination_cidr_block="0.0.0.0/0", gateway_id=inet_gateway.id, dry_run=BOOLEAN_DRYRUN ) except Exception as e: print "GW Route Alert: {}.\n".format(e.message) else: print "Default Internet Route for Gateway created: Success\n" exist_sec_group = ec2_connect.get_all_security_groups(filters=[("group-name", SG_PREFIX)]) print "Exist SG: ", exist_sec_group if not len(exist_sec_group): try: sec_group = ec2_connect.create_security_group( name=SG_PREFIX, description="Security Group for " + PROJECT_TAG, vpc_id=aws_vpc_id, dry_run=BOOLEAN_DRYRUN ) except Exception as e: print "Alert: {}.\n".format(e.message) else: sec_group_id = sec_group.id print "SG ID: ", sec_group_id print "Security Group: {} created.\n".format(sec_group) sec_group.authorize( ip_protocol='tcp', from_port=22, to_port=22, cidr_ip="0.0.0.0/0") sec_group.authorize( ip_protocol='tcp', from_port=80, to_port=80, cidr_ip="0.0.0.0/0") sec_group.authorize( ip_protocol='tcp', from_port=443, to_port=443, cidr_ip="0.0.0.0/0") else: # I can't find a way to get Security Group ID so have to delete & recreate; becuase delete doesn't work # yet so have to create. This will result in error later. sec_group_name = str(exist_sec_group.pop(0))[14:] print "Name to delete: ", sec_group_name # To-do: Below does NOT work # ec2_connect.delete_security_group(name = sec_group_name) # Re-create etc. # ******************** # ********** SSH KEY Block ********** print "********** SSH KEY Block **********\n" # Create Key Pair on the account try: new_key_pair = ec2_connect.create_key_pair(PROJECT_TAG+"Keypair", dry_run = BOOLEAN_DRYRUN) except Exception as e: print "Alert: {}.\n".format(e.message) new_key_pair_id = PROJECT_TAG + "Keypair" else: print "SSH Keypair: {} created.\n".format(new_key_pair) new_key_pair_id = new_key_pair.name # Try to save the new key try: new_key_pair.save(KEY_DIR) except Exception as e: print "Alert: {}.\n".format(e.message) print "SSH Key is NOT saved!\n" else: print "SSH Keypair saved in {}\n".format(KEY_DIR) cmd = "sudo chmod 400 " + KEY_DIR + KEY_NAME os.system(cmd) # ******************** # ********** EC2 Block ********** # http://www.saltycrane.com/blog/2010/03/how-list-attributes-ec2-instance-python-and-boto/ print "********** EC2 Block **********\n" try: subnet1_interface_Spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet1_id, associate_public_ip_address = True) # Got some error later; so had to remove this: # groups = sec_group_id, subnet1_interface = boto.ec2.networkinterface.NetworkInterfaceCollection(subnet1_interface_Spec) except Exception as e: print "\nAlert: {}.\n".format(e.message) else: print "Interface for subnet 1 prepared.\n" try: subnet2_interface_Spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet2_id, associate_public_ip_address = True) subnet2_interface = boto.ec2.networkinterface.NetworkInterfaceCollection(subnet2_interface_Spec) except Exception as e: print "\nAlert: {}.\n".format(e.message) else: print "Interface for subnet 2 prepared.\n" ec2_reserve1 = ec2_connect.run_instances ( image_id = AMI_IMAGE, key_name = new_key_pair_id, instance_type = INSTANCE_SIZE, placement = AVAIL_ZONE1, network_interfaces = subnet1_interface, dry_run = BOOLEAN_DRYRUN) ec2_reserve2 = ec2_connect.run_instances ( image_id = AMI_IMAGE, key_name = new_key_pair_id, instance_type = INSTANCE_SIZE, placement = AVAIL_ZONE2, network_interfaces = subnet2_interface, dry_run = BOOLEAN_DRYRUN) # Got this error message: # Network interfaces and an instance-level subnet ID may not be specified on the same request # So had to remove this: # subnet_id = subnet1_id, security_group_ids = [sec_group_id], # Also no need for subnet info as that's already specified in the subnet interface setting # Ref. https://github.com/aws/aws-sdk-php/issues/231 # Ref. https://github.com/aws/aws-cli/issues/518 # Ref. http://stackoverflow.com/questions/19029588/how-to-auto-assign-public-ip-to-ec2-instance-with-boto ec2_instance1 = ec2_reserve1.instances[0] print "Wait for instance1 to be running:\n" while ec2_instance1.state != "running": print ". ", time.sleep(4) ec2_instance1.update() print "Running!\n" ec2_instance2 = ec2_reserve2.instances[0] print "Wait for instance2 to be running:\n" while ec2_instance2.state != "running": print ". ", time.sleep(4) ec2_instance2.update() print "Running!\n" ec2_instance1_id = ec2_instance1.id ec2_instance2_id = ec2_instance2.id ec2_instance1_dns = ec2_instance1.public_dns_name ec2_instance2_dns = ec2_instance2.public_dns_name # https://groups.google.com/forum/#!topic/boto-users/j_CfsT-o19U ec2_connect.modify_instance_attribute(ec2_instance1_id, "groupSet", [sec_group_id]) ec2_connect.modify_instance_attribute(ec2_instance2_id, "groupSet", [sec_group_id]) bool_connection = False while bool_connection == False: print "Attempt to connect Instance 1..." try: ssh_connect = sshclient_from_instance(instance = ec2_instance1, ssh_key_file = KEY_DIR + KEY_NAME, user_name = DEFAULT_USER) except Exception as e: print "Alert: {}.\n".format(e.message) print "Waiting for SSH service...", time.sleep(5) # Wait for SSH service else: print "Connection to instance1 is successful\n" time.sleep(2) ssh_connect.run_pty("sudo sed -i \'s/requiretty/!requiretty/\' /etc/sudoers") time.sleep(4) print ssh_connect.run("sudo yum update -y; sudo yum groupinstall -y \"Web Server\" \"PHP Support\"; sudo yum install -y php-mysql php-xml php-mbstring php-gd; sudo service httpd start; sudo chkconfig httpd on") bool_connection = True print "\n\n ********************* Web Server Installation completed *********************\n\n" bool_connection = False while bool_connection == False: print "Attempt to connect to Instance 2..." try: ssh_connect = sshclient_from_instance(instance = ec2_instance2, ssh_key_file = KEY_DIR + KEY_NAME, user_name = DEFAULT_USER) except Exception as e: print "Alert: {}.\n".format(e.message) print "Waiting for SSH service...", time.sleep(5) # Wait for SSH service else: print "Connection to instance2 is successful\n" time.sleep(2) ssh_connect.run_pty("sudo sed -i \'s/requiretty/!requiretty/\' /etc/sudoers") time.sleep(4) print ssh_connect.run("sudo yum update -y; sudo yum groupinstall -y \"Web Server\" \"PHP Support\"; sudo yum install -y php-mysql php-xml php-mbstring php-gd; sudo service httpd start; sudo chkconfig httpd on") bool_connection = True print "\n\n ********************* Web Server Installation completed *********************\n\n" # ************************* Web Service Test ******************************** try: print "\n\n *************** Testing Web Service 1: ", ec2_instance1_dns, "\n\n" time.sleep(5) os.system("curl -m 3 " + ec2_instance1_dns) except Exception as e: print "Alert: {}.\n".format(e.message) else: print "\n\n ****************Web Service 1 testing completed. ****************\n" try: print "\n\n *************** Testing Web Service 2: ", ec2_instance2_dns, "\n\n" time.sleep(5) os.system("curl -m 3 " + ec2_instance2_dns) except Exception as e: print "Alert: {}.\n".format(e.message) else: print "\n\n ****************Web Service 2 testing completed. ****************\n"
{ "content_hash": "642876781ab3bb5a4866c71856eb4169", "timestamp": "", "source": "github", "line_count": 348, "max_line_length": 218, "avg_line_length": 37.81896551724138, "alnum_prop": 0.6421244586277638, "repo_name": "AnthonyWC/aws_boto", "id": "ba3bb068bc325e3dd1f8400840ea7a20d857b492", "size": "13302", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "deploy.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "13302" } ], "symlink_target": "" }
import socket TCP_IP = '127.0.0.1' TCP_PORT = 5005 BUFFER_SIZE = 1024 MESSAGE = "i am sean" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((TCP_IP, TCP_PORT)) s.send(MESSAGE) data = s.recv(BUFFER_SIZE) s.close() print "received data:", data
{ "content_hash": "a8df2b86a9826c00194c0abd3b538b9e", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 53, "avg_line_length": 17.466666666666665, "alnum_prop": 0.6908396946564885, "repo_name": "sean666888/The-Bomb", "id": "9bb0c77cc343af8ba059612dbe86dbb64514a4e3", "size": "285", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "c.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1595" } ], "symlink_target": "" }
from flask import Flask, request, send_from_directory, make_response, redirect app = Flask(__name__, static_url_path='') @app.before_request def before_request(): authenticatedMethods = ['/list', '/another'] session_id = request.cookies.get('session_id') if session_id is None and any(request.path in s for s in authenticatedMethods): response = make_response(redirect('/error')) response.set_cookie('session_id', '', expires=0) return response @app.route("/error") def error(): return '{"error":"unauthorized request"}' @app.route("/login") def login(): response = make_response(redirect('/login2')) response.set_cookie('session_id', '123123123') return response @app.route("/login2") def login2(): return '{"isLogged":"true", "message": "Welcome", "user": "John Doe"}' @app.route("/list") def list(): return '[{"id":"1","name":"aaa"},{"id":"2","name":"vvv"},{"id":"3","name":"qqq"},{"id":"4","name":"xxx"}]' @app.route('/js/<path:path>') def send_js(path): return send_from_directory('js', path) @app.route('/css/<path:path>') def send_css(path): return send_from_directory('css', path) @app.route('/image/<path:path>') def send_image(path): return send_from_directory('image', path) @app.route('/html/<path:path>') def send_html(path): return send_from_directory('html', path) if __name__ == "__main__": app.run()
{ "content_hash": "601b24c1689401eccd15d505cc5ed9c0", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 110, "avg_line_length": 24.033898305084747, "alnum_prop": 0.6283497884344147, "repo_name": "canmogol/LightGap", "id": "74a39b733384d53b56b0727be3d9035c00d04b26", "size": "1418", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "18085" }, { "name": "HTML", "bytes": "10598" }, { "name": "JavaScript", "bytes": "85712" }, { "name": "PHP", "bytes": "1512" }, { "name": "Python", "bytes": "1366" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Institution', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('added_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('name', models.CharField(max_length=255)), ('notes', models.TextField(null=True, blank=True)), ('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Resolver', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('added_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('endpoint', models.URLField(help_text=b'The address to which CoINS metadata will be appended to create an OpenURL link.', max_length=1000)), ('notes', models.TextField(null=True, blank=True)), ('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ('belongs_to', models.OneToOneField(related_name='resolver', to='openurl.Institution', on_delete=models.CASCADE)), ], options={ 'abstract': False, }, ), ]
{ "content_hash": "856766d03b57e54ca03fed3bc8d310a5", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 157, "avg_line_length": 42.86046511627907, "alnum_prop": 0.5746066196418882, "repo_name": "upconsulting/IsisCB", "id": "4e73ec05e83ca67dac6b314a03dc361f8a8fed35", "size": "1867", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "isiscb/openurl/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "34013" }, { "name": "Dockerfile", "bytes": "420" }, { "name": "HTML", "bytes": "1182137" }, { "name": "JavaScript", "bytes": "221954" }, { "name": "Less", "bytes": "67102" }, { "name": "Procfile", "bytes": "88" }, { "name": "Python", "bytes": "1758838" }, { "name": "Roff", "bytes": "285" }, { "name": "SCSS", "bytes": "67969" }, { "name": "Shell", "bytes": "12632" } ], "symlink_target": "" }
import tkinter as tk import DanceAnno_InputDialog class PlotAnnotation(tk.Tk): """ Plot annotation lines. Two levels of annotation are supported, A and B. Each level is denoted by an instance of this class with different levelId """ def __init__(self, myMainGUI, color, create_button, levelId): """ Initialize parameters :param myMainGUI: The instance of the DanceAnno_MainGUI :param color: Color to give to the segmentation lines :param create_button: Mouse or Keyboard button that generates the segmentation line :param levelId: 'A' for first levelId and 'B' for second levelId :return: """ self.myMainGUI = myMainGUI self.color = color self.create_button = create_button self.levelId = levelId def plot(self, annotationSecs, labels, canvas_SG, Fs, root, length_signal_samples): """ Plot segmentation lines from previous annotation (annotationSecs, labels). The changes that will be made are taken from the canvas elements. :param annotationSecs: annotation timestamps :param labels: labels per annotation timestamp :param canvas_SG: list of canvases :param Fs: Sampling rate :param root: root of the window :param length_signal_samples: Total length of signal :return: """ self.annotationSecs = annotationSecs # labels are only when annotation is already loaded. Otherwise labels are taken from tags self.labels = labels self.iStep = 0 self.canvas_SG = canvas_SG self.Fs = Fs self.root = root self.length_signal_samples = length_signal_samples # this data is used to keep track of an item being dragged self._drag_data = {"x": 0, "item": None} # The width of the canvas cavnw = self.canvas_SG[0].winfo_width() # create the segmentation lines if (annotationSecs is not None): for i in range(len(annotationSecs)): xCanvas = int( float(annotationSecs[i] * Fs)/self.length_signal_samples * cavnw) self._create_token( xCanvas, str(i), self.labels[i]) # the last step index where user can add steps self.iStep = len(annotationSecs) # Add bindings to canvas for clicking, dragging and releasing over any object with the "anntoken" tag for i in range(len(self.canvas_SG)): self.canvas_SG[i].tag_bind("anntoken", "<ButtonPress-1>", self.OnTokenButtonPress) self.canvas_SG[i].tag_bind("anntoken", "<ButtonRelease-1>", self.OnTokenButtonRelease) self.canvas_SG[i].tag_bind("anntoken", "<B1-Motion>", self.OnTokenMotion) self.canvas_SG[i].tag_bind("anntoken", "<Enter>", self.OnTokenEnter) self.canvas_SG[i].tag_bind("anntoken", "<Leave>", self.OnTokenLeave) self.canvas_SG[i].bind("d", self.OnTokenDelete) self.canvas_SG[i].bind(self.create_button, self.OnTokenButtonPressB) def _create_token(self, xCanvas, identificationCode, description): """ Create segmentation line and text label :param xCanvas: x coordinate in the canvas to plot the line :param identificationCode: number indicating the index of segmentation line :param description: the annotation actually :return: """ # Canvas height cavnh = self.canvas_SG[0].winfo_height() # Iterate over canvases to plot the line and the text for dim in range(len(self.canvas_SG)): # Line self.canvas_SG[dim].create_line(xCanvas, 0, xCanvas, cavnh, fill=self.color, tags=('anntoken', identificationCode + '_line', 'anntoken_' + self.levelId, 'all_resize')) # Text self.canvas_SG[dim].create_text(xCanvas + 1, cavnh * 1 / 2, text = description, font=("Arial", 6, "normal"), fill= self.color, anchor='w', tags=('anntoken', identificationCode + '_text', 'anntoken_' + self.levelId, description, 'all_resize')) def OnTokenButtonPress(self, event): """ On left mouse press over segmentation line make it selected :param event: detect x and y of the mouse :return: """ x = self.canvas_SG[0].canvasx(event.x) y = self.canvas_SG[0].canvasy(event.y) # Find the closest segmentation line on the x and y of the mouse click self._drag_data = {"x": event.x, "item": self.canvas_SG[0].find_closest(x, y)[0]} def OnTokenButtonPressB(self, event): """ Right mouse click generates a new segmentation line or renames the closest one if exists :param event: event used to find the x and y of the mouse click :return: """ # Find the x and y in the canvas that was clicked x = self.canvas_SG[0].canvasx(event.x) y = self.canvas_SG[0].canvasy(event.y) # Find closest item item = self.canvas_SG[0].find_closest(x, y)[0] if self.canvas_SG[0].gettags(item)[0] != 'anntoken': # Create a new segmentation line self._create_token(x, str(self.iStep), 'Step ' + str(self.iStep + 1)) # Update the last index self.iStep += 1 else: # Rename the old one using a dialogue GUI DanceAnno_InputDialog.MyDialog(self.root, "Input", self.canvas_SG, item, self.levelId) def OnTokenButtonRelease(self, event): """ End drag of an object. Reset the drag information. :param event: ignore :return: """ self._drag_data = {"x": 0, "item": None} self.myMainGUI.root.after(200, self.myMainGUI.bindButtons) def OnTokenDelete(self, event): """ Delete a segmentation line closest to the mouse cursor by pressing 'd' key. The algorithms deletes the line and the text simultaneously. :param event: ignore :return: """ x = self.canvas_SG[0].canvasx(self.myMainGUI.mouse_x) y = self.canvas_SG[0].canvasy(self.myMainGUI.mouse_y) # Find the item to delete itemd = self.canvas_SG[0].find_closest(x, y)[0] # Get item tags tagslist = self.canvas_SG[0].gettags(itemd) # Continue only if the item selected is an annotation object (text or line) if 'anntoken' in tagslist: # Find its levelId, i.e. either 'anntoken_A' or 'anntoken_B' levelId = tagslist[2] # Find all annotation texts and lines that match tagslist tag_item_text = [stext for stext in tagslist if "_text" in stext] + \ [sline for sline in tagslist if "_line" in sline] # Find the text tag tag_item_text = tag_item_text[0] # Isolated the id [id]_text pos_of_underscore = tag_item_text.rfind('_') id = tag_item_text[0:pos_of_underscore] # Iterate over all canvases for dim in range(len(self.canvas_SG)): # Iterate to match the current levelId (it contains both for A and B levels) itemtexts = self.canvas_SG[dim].find_withtag(id + '_text') itemlines = self.canvas_SG[dim].find_withtag(id + '_line') for itemtext in itemtexts: if self.canvas_SG[dim].gettags(itemtext)[2] == levelId: self.canvas_SG[dim].delete(itemtext) for itemline in itemlines: if self.canvas_SG[dim].gettags(itemline)[2] == levelId: self.canvas_SG[dim].delete(itemline) def OnTokenMotion(self, event): """ Move Annotation Line :param event: Use to calculate the drag :return: """ self.myMainGUI.unbindButtons() # if Playline is holded then ignore if self.myMainGUI.myPlayLine.isPlayLineHold: return # Calculte difference delta_x = event.x - self._drag_data["x"] # Get the item to drag item = self._drag_data["item"] # Get selected item tags tags = self.canvas_SG[0].gettags(item) # xxx_line or xxx_text idtag = tags[1] # anntoken_A or anntoken_B levelId = tags[2] # xxx from xxx_line or xxx_text id = idtag[0:idtag.rfind('_')] # move text or line (depends which is selected) for i in range(len(self.canvas_SG)): itemtexts = self.canvas_SG[i].find_withtag(id + '_text') # it contains both for A and B levels itemlines = self.canvas_SG[i].find_withtag(id + '_line') for itemtext in itemtexts: if self.canvas_SG[0].gettags(itemtext)[2] == levelId: self.canvas_SG[i].move(itemtext, delta_x, 0) for itemline in itemlines: if self.canvas_SG[0].gettags(itemline)[2] == levelId: self.canvas_SG[i].move(itemline, delta_x, 0) # record the new position self._drag_data["x"] = event.x def OnTokenEnter(self, event): # Change cursor icon on hover above item self.root.config(cursor="crosshair") def OnTokenLeave(self, event): # Change cursor icon on hover out of item self.root.config(cursor="")
{ "content_hash": "cffb0976c650b945e3baa3b7cf24a617", "timestamp": "", "source": "github", "line_count": 255, "max_line_length": 150, "avg_line_length": 37.22352941176471, "alnum_prop": 0.5929203539823009, "repo_name": "MKLab-ITI/DanceAnno", "id": "fc0bb97f54b1045215a69db26311af932a363d14", "size": "9492", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "DanceAnno_AnnFunctions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "36" }, { "name": "Python", "bytes": "129402" } ], "symlink_target": "" }
import copy from constants import ENTITY_TYPES, PATH_SEPARATOR def pluralize(input): if input[-1] == 'y': return '{0}ies'.format(input[:-1]) else: return '{0}s'.format(input) def extract_ids(node_instances, key='id'): if node_instances: return [instance[key] if isinstance(instance, dict) else getattr(instance, key) for instance in node_instances] else: return [] class ModifiedEntitiesDict(object): def __init__(self): self.modified_entity_ids = \ {entity_type: [] for entity_type in ENTITY_TYPES} def __setitem__(self, entity_type, entity_id): self.modified_entity_ids[entity_type].append(entity_id) def __getitem__(self, entity_type): return self.modified_entity_ids[entity_type] def __iter__(self): return iter(self.modified_entity_ids) def to_dict(self, include_rel_order=False): """ Relationship entity ids support both order manipulation and adding/removing relationships. Thus, t_id could be both target id (for add/remove relationships), and (source_index, target_index) for order manipulation. In order to get the order you should pass the include_rel_order flag, and the dict returned will hold these changes under rel_order_key. :param include_rel_order: whether to extract the changes. :return: dict of modified entity ids. """ relationships = {} rel_order = {} for s_id, t_id in self.modified_entity_ids[ENTITY_TYPES.RELATIONSHIP]: if isinstance(t_id, tuple): if include_rel_order: if s_id in rel_order: rel_order[s_id].append(t_id) else: rel_order[s_id] = [t_id] else: if s_id in relationships: relationships[s_id].append(t_id) else: relationships[s_id] = [t_id] modified_entities_to_return = copy.deepcopy(self.modified_entity_ids) modified_entities_to_return[ENTITY_TYPES.RELATIONSHIP] = \ relationships if include_rel_order: modified_entities_to_return['rel_mappings'] = rel_order return modified_entities_to_return def traverse_object(obj, breadcrumbs): """ Traverses an object constructed out of dicts and lists. :param obj: the object to traverse :param breadcrumbs: the breadcrumbs on which to traverse, while list indices surrounded by [x] :return: the object at the end of the breadcrumbs """ if not breadcrumbs: return obj current_key = breadcrumbs[0] if isinstance(obj, dict): if current_key in obj: return traverse_object(obj[breadcrumbs[0]], breadcrumbs[1:]) elif isinstance(obj, list): index = parse_index(current_key) if index is not None and len(obj) >= index: return traverse_object(obj[index], breadcrumbs[1:]) else: return None def create_dict(breadcrumbs, value=None): """ Created a dict out of the breadcrumbs in a recursive manner. each entry in the breadcrumb should be a valid dictionary key. If value is None, the last string within' the breadcrumbs becomes the final value. :param breadcrumbs: :param value: :return: """ if value is not None: if not breadcrumbs: return value elif len(breadcrumbs) == 1: return breadcrumbs[0] return {breadcrumbs[0]: create_dict(breadcrumbs[1:], value)} def get_entity_keys(entity_id): return entity_id.split(PATH_SEPARATOR) def get_raw_node(blueprint, node_id): nodes = [n for n in blueprint.get('nodes', []) if n['id'] == node_id] return nodes[0] if nodes else {} def check_is_int(s): try: int(s) except ValueError: return False return True def parse_int(s): if check_is_int(s): return int(s) else: return None def parse_index(s): return parse_int(s[1:-1]) def index_to_str(index): if check_is_int(index): return '[{0}]'.format(index)
{ "content_hash": "2520afe5683ac0d4f57a25b9ff4482c7", "timestamp": "", "source": "github", "line_count": 148, "max_line_length": 78, "avg_line_length": 28.574324324324323, "alnum_prop": 0.6048711279262237, "repo_name": "isaac-s/cloudify-manager", "id": "7fae420a7cba76d2885c34c34166f762b9e5b729", "size": "4229", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rest-service/manager_rest/deployment_update/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Clojure", "bytes": "4067" }, { "name": "Mako", "bytes": "541" }, { "name": "Python", "bytes": "1793118" }, { "name": "Ruby", "bytes": "40193" }, { "name": "Shell", "bytes": "41526" } ], "symlink_target": "" }
import json, sys from datetime import date sys.path.append('../..') import angle_format template = """// GENERATED FILE - DO NOT EDIT. // Generated by gen_load_functions_table.py using data from load_functions_data.json // // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // load_functions_table: // Contains the GetLoadFunctionsMap for texture_format_util.h // #include "libANGLE/renderer/load_functions_table.h" #include "image_util/copyimage.h" #include "image_util/generatemip.h" #include "image_util/loadimage.h" using namespace rx; namespace angle {{ namespace {{ // ES3 image loading functions vary based on: // - the GL internal format (supplied to glTex*Image*D) // - the GL data type given (supplied to glTex*Image*D) // - the target DXGI_FORMAT that the image will be loaded into (which is chosen based on the D3D // device's capabilities) // This map type determines which loading function to use, based on these three parameters. // Source formats and types are taken from Tables 3.2 and 3.3 of the ES 3 spec. void UnimplementedLoadFunction(size_t width, size_t height, size_t depth, const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch, uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch) {{ UNIMPLEMENTED(); }} void UnreachableLoadFunction(size_t width, size_t height, size_t depth, const uint8_t *input, size_t inputRowPitch, size_t inputDepthPitch, uint8_t *output, size_t outputRowPitch, size_t outputDepthPitch) {{ UNREACHABLE(); }} {load_functions_data}}} // namespace LoadFunctionMap GetLoadFunctionsMap(GLenum {internal_format}, Format::ID {angle_format}) {{ // clang-format off switch ({internal_format}) {{ {switch_data} default: {{ static LoadFunctionMap emptyLoadFunctionsMap; return emptyLoadFunctionsMap; }} }} // clang-format on }} // GetLoadFunctionsMap }} // namespace angle """ internal_format_param = 'internalFormat' angle_format_param = 'angleFormat' angle_format_unknown = 'NONE' def load_functions_name(internal_format, angle_format): return internal_format[3:] + "_to_" + angle_format def unknown_func_name(internal_format): return load_functions_name(internal_format, "default") def get_load_func(func_name, type_functions): snippet = "LoadImageFunctionInfo " + func_name + "(GLenum type)\n" snippet += "{\n" snippet += " switch (type)\n" snippet += " {\n" for gl_type, load_function in sorted(type_functions.iteritems()): snippet += " case " + gl_type + ":\n" requiresConversion = str('LoadToNative<' not in load_function).lower() snippet += " return LoadImageFunctionInfo(" + load_function + ", " + requiresConversion + ");\n" snippet += " default:\n" snippet += " UNREACHABLE();\n" snippet += " return LoadImageFunctionInfo(UnreachableLoadFunction, true);\n" snippet += " }\n" snippet += "}\n" snippet += "\n" return snippet def get_unknown_load_func(angle_to_type_map, internal_format): assert angle_format_unknown in angle_to_type_map return get_load_func(unknown_func_name(internal_format), angle_to_type_map[angle_format_unknown]) def parse_json(json_data): table_data = '' load_functions_data = '' for internal_format, angle_to_type_map in sorted(json_data.iteritems()): s = ' ' table_data += s + 'case ' + internal_format + ':\n' do_switch = len(angle_to_type_map) > 1 or angle_to_type_map.keys()[0] != angle_format_unknown if do_switch: table_data += s + '{\n' s += ' ' table_data += s + 'switch (' + angle_format_param + ')\n' table_data += s + '{\n' s += ' ' for angle_format, type_functions in sorted(angle_to_type_map.iteritems()): if angle_format == angle_format_unknown: continue func_name = load_functions_name(internal_format, angle_format) # Main case statements table_data += s + 'case Format::ID::' + angle_format + ':\n' table_data += s + ' return ' + func_name + ';\n' if angle_format_unknown in angle_to_type_map: for gl_type, load_function in angle_to_type_map[angle_format_unknown].iteritems(): if gl_type not in type_functions: type_functions[gl_type] = load_function load_functions_data += get_load_func(func_name, type_functions) if do_switch: table_data += s + 'default:\n' if angle_format_unknown in angle_to_type_map: table_data += s + ' return ' + unknown_func_name(internal_format) + ';\n' load_functions_data += get_unknown_load_func(angle_to_type_map, internal_format) else: table_data += s + ' break;\n' if do_switch: s = s[4:] table_data += s + '}\n' s = s[4:] table_data += s + '}\n' return table_data, load_functions_data json_data = angle_format.load_json('load_functions_data.json') switch_data, load_functions_data = parse_json(json_data) output = template.format(internal_format = internal_format_param, angle_format = angle_format_param, switch_data = switch_data, load_functions_data = load_functions_data, copyright_year = date.today().year) with open('load_functions_table_autogen.cpp', 'wt') as out_file: out_file.write(output) out_file.close()
{ "content_hash": "df86c6c23b51446d824502ffd0ac3b00", "timestamp": "", "source": "github", "line_count": 182, "max_line_length": 115, "avg_line_length": 34.527472527472526, "alnum_prop": 0.5752705283259071, "repo_name": "ecoal95/angle", "id": "eb3e4c5bf08212d427d6ab5031d7574ec4664930", "size": "6578", "binary": false, "copies": "3", "ref": "refs/heads/servo", "path": "src/libANGLE/renderer/gen_load_functions_table.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "10208" }, { "name": "C", "bytes": "561165" }, { "name": "C++", "bytes": "7454663" }, { "name": "Lex", "bytes": "26372" }, { "name": "Objective-C", "bytes": "18506" }, { "name": "Objective-C++", "bytes": "25537" }, { "name": "PostScript", "bytes": "989" }, { "name": "Python", "bytes": "63180" }, { "name": "Rust", "bytes": "11000" }, { "name": "Shell", "bytes": "1461" }, { "name": "Yacc", "bytes": "61968" } ], "symlink_target": "" }
"""Tests for GZip files.""" # Note: do not rename file to gzip.py this can cause the exception: # AttributeError: 'module' object has no attribute 'GzipFile' # when using pip. import unittest from dtformats import gzipfile from tests import test_lib class GZipFileTest(test_lib.BaseTestCase): """GZip file tests.""" # pylint: disable=protected-access # TODO: test _ReadCompressedData function # TODO: test _ReadMemberCompressedData function # TODO: test _ReadMemberFooter function def testReadMemberHeader(self): """Tests the _ReadMemberHeader function.""" output_writer = test_lib.TestOutputWriter() test_file = gzipfile.GZipFile(output_writer=output_writer) test_file_path = self._GetTestFilePath(['syslog.gz']) self._SkipIfPathNotExists(test_file_path) with open(test_file_path, 'rb') as file_object: test_file._ReadMemberHeader(file_object) def testReadFileObject(self): """Tests the ReadFileObject.""" output_writer = test_lib.TestOutputWriter() # TODO: add debug=True test_file = gzipfile.GZipFile(output_writer=output_writer) test_file_path = self._GetTestFilePath(['syslog.gz']) self._SkipIfPathNotExists(test_file_path) test_file.Open(test_file_path) if __name__ == '__main__': unittest.main()
{ "content_hash": "0f664ce728f721eed77e6d6638d353a1", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 67, "avg_line_length": 27.51063829787234, "alnum_prop": 0.7146171693735499, "repo_name": "libyal/dtformats", "id": "0879c20a091a655a5cbdc7bff43ac67a281b6357", "size": "1317", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/gzipfile.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "122" }, { "name": "PowerShell", "bytes": "827" }, { "name": "Python", "bytes": "700241" }, { "name": "Shell", "bytes": "1139" } ], "symlink_target": "" }
import os import re import ujson from django.conf import settings from django.utils.translation import ugettext as _ from typing import Optional, Tuple from zerver.lib.request import JsonableError from zerver.lib.upload import upload_backend from zerver.models import Reaction, Realm, RealmEmoji, UserProfile EMOJI_PATH = os.path.join(settings.STATIC_ROOT, "generated", "emoji") NAME_TO_CODEPOINT_PATH = os.path.join(EMOJI_PATH, "name_to_codepoint.json") CODEPOINT_TO_NAME_PATH = os.path.join(EMOJI_PATH, "codepoint_to_name.json") EMOTICON_CONVERSIONS_PATH = os.path.join(EMOJI_PATH, "emoticon_conversions.json") with open(NAME_TO_CODEPOINT_PATH) as fp: name_to_codepoint = ujson.load(fp) with open(CODEPOINT_TO_NAME_PATH) as fp: codepoint_to_name = ujson.load(fp) with open(EMOTICON_CONVERSIONS_PATH) as fp: EMOTICON_CONVERSIONS = ujson.load(fp) possible_emoticons = EMOTICON_CONVERSIONS.keys() possible_emoticon_regexes = map(re.escape, possible_emoticons) # type: ignore # AnyStr/str issues terminal_symbols = ',.;?!()\\[\\] "\'\\n\\t' # type: str # from composebox_typeahead.js emoticon_regex = ('(?<![^{0}])(?P<emoticon>('.format(terminal_symbols) + ')|('.join(possible_emoticon_regexes) # type: ignore # AnyStr/str issues + '))(?![^{0}])'.format(terminal_symbols)) # Translates emoticons to their colon syntax, e.g. `:smiley:`. def translate_emoticons(text: str) -> str: translated = text for emoticon in EMOTICON_CONVERSIONS: translated = re.sub(re.escape(emoticon), EMOTICON_CONVERSIONS[emoticon], translated) return translated def emoji_name_to_emoji_code(realm: Realm, emoji_name: str) -> Tuple[str, str]: realm_emojis = realm.get_active_emoji() realm_emoji = realm_emojis.get(emoji_name) if realm_emoji is not None: return str(realm_emojis[emoji_name]['id']), Reaction.REALM_EMOJI if emoji_name == 'zulip': return emoji_name, Reaction.ZULIP_EXTRA_EMOJI if emoji_name in name_to_codepoint: return name_to_codepoint[emoji_name], Reaction.UNICODE_EMOJI raise JsonableError(_("Emoji '%s' does not exist") % (emoji_name,)) def check_valid_emoji(realm: Realm, emoji_name: str) -> None: emoji_name_to_emoji_code(realm, emoji_name) def check_emoji_request(realm: Realm, emoji_name: str, emoji_code: str, emoji_type: str) -> None: # For a given realm and emoji type, checks whether an emoji # code is valid for new reactions, or not. if emoji_type == "realm_emoji": realm_emojis = realm.get_emoji() realm_emoji = realm_emojis.get(emoji_code) if realm_emoji is None: raise JsonableError(_("Invalid custom emoji.")) if realm_emoji["name"] != emoji_name: raise JsonableError(_("Invalid custom emoji name.")) if realm_emoji["deactivated"]: raise JsonableError(_("This custom emoji has been deactivated.")) elif emoji_type == "zulip_extra_emoji": if emoji_code not in ["zulip"]: raise JsonableError(_("Invalid emoji code.")) if emoji_name != emoji_code: raise JsonableError(_("Invalid emoji name.")) elif emoji_type == "unicode_emoji": if emoji_code not in codepoint_to_name: raise JsonableError(_("Invalid emoji code.")) if name_to_codepoint.get(emoji_name) != emoji_code: raise JsonableError(_("Invalid emoji name.")) else: # The above are the only valid emoji types raise JsonableError(_("Invalid emoji type.")) def check_emoji_admin(user_profile: UserProfile, emoji_name: Optional[str]=None) -> None: """Raises an exception if the user cannot administer the target realm emoji name in their organization.""" # Realm administrators can always administer emoji if user_profile.is_realm_admin: return if user_profile.realm.add_emoji_by_admins_only: raise JsonableError(_("Must be an organization administrator")) # Otherwise, normal users can add emoji if emoji_name is None: return # Additionally, normal users can remove emoji they themselves added emoji = RealmEmoji.objects.filter(realm=user_profile.realm, name=emoji_name, deactivated=False).first() current_user_is_author = (emoji is not None and emoji.author is not None and emoji.author.id == user_profile.id) if not user_profile.is_realm_admin and not current_user_is_author: raise JsonableError(_("Must be an organization administrator or emoji author")) def check_valid_emoji_name(emoji_name: str) -> None: if re.match(r'^[0-9a-z.\-_]+(?<![.\-_])$', emoji_name): return raise JsonableError(_("Invalid characters in emoji name")) def get_emoji_url(emoji_file_name: str, realm_id: int) -> str: return upload_backend.get_emoji_url(emoji_file_name, realm_id) def get_emoji_file_name(emoji_file_name: str, emoji_id: int) -> str: _, image_ext = os.path.splitext(emoji_file_name) return ''.join((str(emoji_id), image_ext))
{ "content_hash": "d197b765537b877f4e431554c951eb71", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 98, "avg_line_length": 43.52100840336134, "alnum_prop": 0.6595867928171462, "repo_name": "rishig/zulip", "id": "80d6e41c67db63e6a02f8ce131f731b9b9fec480", "size": "5180", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "zerver/lib/emoji.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "394414" }, { "name": "Dockerfile", "bytes": "2939" }, { "name": "Emacs Lisp", "bytes": "158" }, { "name": "HTML", "bytes": "721392" }, { "name": "JavaScript", "bytes": "3050898" }, { "name": "Perl", "bytes": "398763" }, { "name": "Puppet", "bytes": "71261" }, { "name": "Python", "bytes": "6870363" }, { "name": "Ruby", "bytes": "6110" }, { "name": "Shell", "bytes": "119762" }, { "name": "TypeScript", "bytes": "14100" } ], "symlink_target": "" }
from unittest import TestCase import json from emojipy.ruleset import ascii_replace,\ unicode_replace, shortcode_replace json_path = 'emojipy/emoji.json' class MappingTests(TestCase): def setUp(self): self.ascii_list = [] with open(json_path) as json_file: content = json_file.read() self.json_dict = json.loads(content) self.emoji_count = len(self.json_dict) for key, value in self.json_dict.items(): self.ascii_list.extend(value['aliases_ascii']) def test_unicode_count(self): self.assertEqual(self.emoji_count, len(unicode_replace)) self.assertEqual(len(ascii_replace), len(self.ascii_list)) self.assertEqual(len(shortcode_replace), len(unicode_replace))
{ "content_hash": "94323696bf21acb6c2f430edadff8ffc", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 70, "avg_line_length": 33.52173913043478, "alnum_prop": 0.6614785992217899, "repo_name": "launchyard/emojipy", "id": "ccc42d5eae94014d65564498904dafce1264b604", "size": "820", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "emojipy/tests/test_ruleset.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "248425" }, { "name": "HTML", "bytes": "595" }, { "name": "Python", "bytes": "151290" } ], "symlink_target": "" }
""" Production Configurations - Use Redis for cache """ from __future__ import absolute_import, unicode_literals from .common import * # noqa # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ SECRET_KEY = env('DJANGO_SECRET_KEY') # SECURITY CONFIGURATION # ------------------------------------------------------------------------------ # See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security # and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True) SECURE_CONTENT_TYPE_NOSNIFF = env.bool( 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True) SECURE_BROWSER_XSS_FILTER = True SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY' # SITE CONFIGURATION # ------------------------------------------------------------------------------ # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{cookiecutter.domain_name}}']) # END SITE CONFIGURATION INSTALLED_APPS += ['gunicorn'] # EMAIL # ------------------------------------------------------------------------------ DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>') EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}] ') SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL) # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader']), ] # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ DATABASES['default'] = env.db('DATABASE_URL') # CACHING # ------------------------------------------------------------------------------ REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0) CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': REDIS_LOCATION, 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'IGNORE_EXCEPTIONS': True, # mimics memcache behavior. # http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior } } } # LOGGING CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True }, 'django.security.DisallowedHost': { 'level': 'ERROR', 'handlers': ['console', 'mail_admins'], 'propagate': True } } } # Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %} ADMIN_URL = env('DJANGO_ADMIN_URL') # Your production stuff: Below this line define 3rd party library settings # ------------------------------------------------------------------------------
{ "content_hash": "f175bf9bd79122127c280ea7545d96b8", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 117, "avg_line_length": 38.43939393939394, "alnum_prop": 0.5577453685455263, "repo_name": "valerymelou/cookiecutter-django-gulp", "id": "b9a2bc4b67ae41447849e91eda569f874a081f90", "size": "5098", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "{{cookiecutter.project_slug}}/config/settings/production.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5146" }, { "name": "HTML", "bytes": "2678" }, { "name": "JavaScript", "bytes": "3104" }, { "name": "Makefile", "bytes": "5664" }, { "name": "Python", "bytes": "37904" } ], "symlink_target": "" }
import urllib2 import urllib from django import http from django.utils.translation import ugettext as _ from django.shortcuts import render_to_response from django.template import RequestContext try: from allauth.account.views import signup as allauth_signup from allauth.account.forms import LoginForm from allauth.account.utils import get_default_redirect ALLAUTH = True except ImportError: ALLAUTH = False def form(request): """ Ajax handler for Google Form submition """ if request.method == 'POST': url = request.POST['url'] submit_url = '%s%shl=%s' % ( url, '&' if '?' in url else '?', request.LANGUAGE_CODE ) params = urllib.urlencode(request.POST) f = urllib2.urlopen(submit_url, params) text = f.read() else: text = _('Error: request type has to be POST') response = http.HttpResponse(text, mimetype="text/plain") return response def signup(request, **kwargs): """ Overrides allauth.account.views.signup """ if not ALLAUTH: return http.HttpResponse(_('allauth not installed...')) if request.method == "POST" and 'login' in request.POST: form_class = LoginForm form = form_class(request.POST) redirect_field_name = "next" success_url = get_default_redirect(request, redirect_field_name) if form.is_valid(): return form.login(request, redirect_url=success_url) response = allauth_signup(request, **kwargs) return response
{ "content_hash": "ab3a199794298c46025a3a11588b108b", "timestamp": "", "source": "github", "line_count": 52, "max_line_length": 72, "avg_line_length": 30, "alnum_prop": 0.6455128205128206, "repo_name": "sternoru/goscalecms", "id": "629ed3c42bc069e1b46424404f0cf36402210e54", "size": "1560", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "goscale/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "28440" }, { "name": "JavaScript", "bytes": "78761" }, { "name": "Python", "bytes": "348404" }, { "name": "Shell", "bytes": "5096" } ], "symlink_target": "" }
from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('editor', '0055_customparttypeaccess'), ] operations = [ migrations.AlterField( model_name='itemchangedtimelineitem', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_changed_timelineitems', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='projectinvitation', name='invited_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_invitations', to=settings.AUTH_USER_MODEL), ), ]
{ "content_hash": "fa4419152621eef4818a6bc49d65d82c", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 153, "avg_line_length": 35.541666666666664, "alnum_prop": 0.6682297772567409, "repo_name": "numbas/editor", "id": "3b4d946088aee0afcfbabcf00ccaf678e7054ffb", "size": "902", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "editor/migrations/0056_auto_20211109_0858.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "44056" }, { "name": "HTML", "bytes": "548468" }, { "name": "JavaScript", "bytes": "2344000" }, { "name": "Less", "bytes": "205670" }, { "name": "Makefile", "bytes": "10028" }, { "name": "Python", "bytes": "551931" } ], "symlink_target": "" }
""" Python Interchangeable Virtual Instrument Driver Copyright (c) 2013-2014 Alex Forencich Modified by Coburn Wightman 2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from .agilentBase5334 import * class agilent5334B(agilentBase5334): "HP / Agilent 5334B universal counter driver" def __init__(self, *args, **kwargs): self.__dict__.setdefault('_instrument_id', 'HP5334B') super(agilent5334B, self).__init__(*args, **kwargs) #self._input_impedance = 50 #self._frequency_low = 10e3 #self._frequency_high = 1.5e9
{ "content_hash": "4a2ea2501665c611e67f6f53666b0e69", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 77, "avg_line_length": 35.34090909090909, "alnum_prop": 0.7562700964630225, "repo_name": "coburnw/hp5334-ivi", "id": "2b16d84a36ffa57eb4ffbfda082a268d2f202015", "size": "1555", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "local/agilent5334B.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "26422" } ], "symlink_target": "" }
"""Perceiver AR Architecture implementation. As described in: "General-purpose, long-context autoregressive modeling with Perceiver AR" https://arxiv.org/abs/2202.07765 """ import dataclasses from typing import List, Optional, Tuple from flax import linen as nn import jax.numpy as jnp from flaxformer.architectures.perceiver_ar import attention from flaxformer.architectures.perceiver_ar import slicing from flaxformer.architectures.t5 import t5_architecture from flaxformer.types import Array @dataclasses.dataclass(frozen=True) class PerceiverARTransparentLayerSequence: """Perceiver AR version of TransparentLayerSequence that manages slicing. The decoder_mask is different for the first layer vs. the remaining layers. Similar for the logit mask and prefill lengths. It's better to do the change outside of the scan-over-layers so that it is done only once. Attributes: layers: List of nn.Modules, which should be owned by a parent Flax module. num_latents: Number of latents and outputs. """ layers: List[nn.Module] num_latents: int def __call__(self, inputs: Array, encoded, decoder_mask=None, encoder_decoder_mask=None, *, logit_mask=None, enable_dropout: bool = True, decode: bool = False, max_decode_length: Optional[int] = None, prefill: bool = False, prefill_lengths: Optional[Array] = None, num_latents: Optional[int] = None, sequence_lengths: Optional[Array] = None) -> Array: """Applies all Transformer layers to the inputs sequentially. Args: inputs: Input data for decoder with shape [batch_size, decoder_seq_length, decoder_hidden_size]. encoded: required to be None, block is Decoder only, only kept for __call__ signature uniformity. decoder_mask: decoder self-attention mask. encoder_decoder_mask: required to be None, block is Decoder only, only kept for __call__ signature uniformity. logit_mask: a mask (e.g., padding logit mask) to be applied to the attention logits. enable_dropout: Enables dropout if set to True. decode: Whether to prepare and use an autoregressive cache. max_decode_length: An optional integer specifying the maximum decoding length. Note that this is only used for defining the relative position embedding parameters. prefill: Whether to run a partial sequence to prefill the cache. prefill_lengths: The length of each partial sequence we are filling in the cache, lengths are inferred from the mask if not provided. num_latents: Used to override the number of output Perceiver AR latents during decoding. sequence_lengths: Lengths of all target sequences. Required for Perceiver AR operation. Returns: The encoded inputs <float>[..., seq_len, hidden_size]. """ if num_latents and num_latents > self.num_latents: raise ValueError( f'Overridden num_latents ({num_latents}) must be <= self.num_latents ' f'({self.num_latents}).') num_latents = num_latents or self.num_latents current_activations = inputs for i, layer in enumerate(self.layers): layer_decoder_mask = decoder_mask if (layer_decoder_mask is not None and layer_decoder_mask.shape[-1] != current_activations.shape[-2]): assert i > 0 # If we're in the self-attention stack, then kv should also be sliced. # From: [batch, 1, num_latents, input_length] # To: [batch, 1, num_latents, num_latents] assert layer_decoder_mask.shape[-1] >= current_activations.shape[-2] layer_decoder_mask = slicing.slice_sequences_vmap( layer_decoder_mask, sequence_lengths, num_latents, axis_within_vmap=-1) layer_prefill_lengths = prefill_lengths if prefill: if layer_prefill_lengths is None: layer_prefill_lengths = sequence_lengths # Ensure prefill_lengths isn't longer than the input length. # For Perceiver AR, this can happen in the self-attention stack, which # is narrower than the actual sequence length. layer_prefill_lengths = jnp.minimum(current_activations.shape[-2], layer_prefill_lengths) layer_logit_mask = logit_mask if (layer_logit_mask is not None and layer_logit_mask.shape[-2] != current_activations.shape[-2]): assert layer_logit_mask.shape[-2] >= current_activations.shape[-2] layer_logit_mask = slicing.slice_sequences_vmap( layer_logit_mask, sequence_lengths, current_activations.shape[-2], axis_within_vmap=0) current_activations = layer( current_activations, encoded, layer_decoder_mask, encoder_decoder_mask, logit_mask=layer_logit_mask, enable_dropout=enable_dropout, decode=decode, max_decode_length=max_decode_length, prefill=prefill, prefill_lengths=layer_prefill_lengths, num_latents=num_latents, sequence_lengths=sequence_lengths) return current_activations class Decoder(t5_architecture.Decoder): """Perceiver AR Decoder. Attributes: num_latents: Number of latents for queries and number of output latents. """ # num_latents is actually required, but has to be marked as optional because # we don't yet require Python 3.10, which provides keyword-only dataclasses. num_latents: Optional[int] = None def setup(self): if self.num_latents is None: raise ValueError('num_latents must be specified.') super().setup() def _setup_layer_sequence(self): lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda shared_relative_position_bias=self.relpos_bias) lyrf = t5_architecture.maybe_remat( lyrf, self.layer_remat, self.scan_layers, static_argnums=(5, 6, 7, 8, 9, 10)) if not self.scan_layers: self.layers = [lyrf() for _ in range(self.num_layers)] return PerceiverARTransparentLayerSequence(self.layers, self.num_latents) else: # Create a non-scanned version of lyrf to use for the first layer. lyrf_notscanned = lambda: self.layer_factory( # pylint: disable=g-long-lambda # pytype: disable=wrong-keyword-args shared_relative_position_bias=self.relpos_bias, scanned=False) lyrf_notscanned = t5_architecture.maybe_remat( lyrf_notscanned, self.layer_remat, self.scan_layers, static_argnums=(5, 6, 7, 8, 9, 10)) self.layers = [ lyrf_notscanned(), self._construct_scanned_decoder( lyrf, self.num_layers - 1, num_broadcast_args=11) ] return PerceiverARTransparentLayerSequence(self.layers, self.num_latents) def decode_from_continuous_inputs(self, embedded_inputs, encoder_outputs, decoder_positions=None, decoder_mask=None, encoder_decoder_mask=None, logit_mask=None, *, enable_dropout: bool = True, decode: bool = False, max_decode_length: Optional[int] = None, prefill: bool = False, prefill_lengths: Optional[Array] = None, num_latents: Optional[int] = None, sequence_lengths: Optional[Array] = None): """Applies the decoder on the continuous (embedded) inputs.""" if decoder_positions is not None: raise NotImplementedError('Perceiver AR does not yet support packing.') # sequence_lengths is required, but has to be defined as optional to # maintain API compatibility. if sequence_lengths is None: raise ValueError('sequence_lengths must be supplied fo Perceiver AR.') if num_latents and num_latents > self.num_latents: raise ValueError( f'Overridden num_latents ({num_latents}) must be <= self.num_latents ' f'({self.num_latents}).') num_latents = num_latents or self.num_latents # If encoded is not given, this block is decoder only and does not contain # attention from decoder to encoder. if encoder_outputs is not None: assert encoder_outputs.ndim == 3 # (batch, len, depth) # Apply the decoder layers, attending to the encoder outputs (if provided), # and attending to previous decoder inputs (by masking future inputs). decoder_outputs = self.decoder( embedded_inputs, encoder_outputs, decoder_mask=decoder_mask, encoder_decoder_mask=encoder_decoder_mask, logit_mask=logit_mask, enable_dropout=enable_dropout, decode=decode, max_decode_length=max_decode_length, prefill=prefill, prefill_lengths=prefill_lengths, num_latents=num_latents, sequence_lengths=sequence_lengths) if self.scan_layers: decoder_outputs = decoder_outputs[0] # Output length should always be <= the number of latents regardless of # input length or configured number of latents. During training it will be # the same. During fast decoding, it may just be 1. assert decoder_outputs.shape[-2] <= num_latents # Post-process final decoder layer outputs. decoder_outputs = self.decoder_norm(decoder_outputs) decoder_outputs = self.output_dropout( decoder_outputs, deterministic=not enable_dropout) # Slice logit_mask to match output positions. if logit_mask is not None: if logit_mask.shape[-2] != decoder_outputs.shape[-2]: assert logit_mask.shape[-2] >= decoder_outputs.shape[-2] logit_mask = slicing.slice_sequences_vmap( logit_mask, sequence_lengths, decoder_outputs.shape[-2], axis_within_vmap=-2) decoder_outputs = logit_mask * decoder_outputs if self.sow_intermediates: self.sow('intermediates', 'pre_logits_layer', decoder_outputs) # Decoded Logits if self.logits_dense is not None: logits = self.logits_dense(decoder_outputs) else: # Use the transpose of embedding matrix for logit transform. logits = self.embedder.embedders['token_ids'].attend(decoder_outputs) # pytype: disable=attribute-error # Correctly normalize pre-softmax logits for this shared case. logits = logits / jnp.sqrt(decoder_outputs.shape[-1]) if self.sow_intermediates: self.sow('intermediates', 'logits', logits) return logits class DecoderOnly(t5_architecture.DecoderOnly): """Perceiver AR Decoder-only model.""" # num_latents is actually required, but has to be marked as optional because # we don't yet require Python 3.10, which provides keyword-only dataclasses. num_latents: Optional[int] = None def setup(self): if self.num_latents is None: raise ValueError('num_latents must be specified.') super().setup() def __call__(self, decoder_input_tokens, decoder_target_tokens, decoder_segment_ids=None, decoder_positions=None, decoder_causal_attention=None, *, enable_dropout: bool = True, decode: bool = False, max_decode_length: Optional[int] = None, prefill: bool = False, prefill_lengths: Optional[Array] = None, num_latents: Optional[int] = None, **kwargs): """Applies Perceiver AR Decoder-only model on the inputs. This method requires both decoder_target_tokens and decoder_input_tokens, which is typically a shifted version of the former. For a packed dataset, it Packing is not currently supported for Perceiver AR. Args: decoder_input_tokens: input token to the decoder. decoder_target_tokens: target token to the decoder. decoder_segment_ids: decoder segmentation info for packed examples. decoder_positions: decoder subsequence positions for packed examples. decoder_causal_attention: a binary mask indicating the "inputs" portion of the concatenated sequence for a prefix LM. enable_dropout: Enables dropout if set to True. decode: Whether to prepare and use an autoregressive cache. max_decode_length: An optional integer specifying the maximum decoding length. Note that this is only used for defining the relative position embedding parameters. prefill: Whether to run a partial sequence to prefill the cache. prefill_lengths: The length of each partial sequence we are filling in the cache, lengths are inferred from the mask if not provided. num_latents: Used to override the number of output Perceiver AR latents during decoding. **kwargs: Additional keyword arguments to pass on to the decoder. Returns: logits array from LanguageModel. """ if decode and prefill: raise ValueError('Only one of `decode` and `prefill` can be set. Use ' '`prefill` to pre-populate the cache for Prefix LMs ' 'before using `decode`') # Perceiver AR operation does not support packing. if decoder_positions is not None: raise NotImplementedError( 'decoder_positions is provided, but Perceiver AR does not yet ' 'support packing.') if decoder_segment_ids is not None: raise NotImplementedError( 'decoder_segment_ids is provided, but Perceiver AR does not yet ' 'support packing.') if num_latents and num_latents > self.num_latents: raise ValueError( f'Overridden num_latents ({num_latents}) must be <= self.num_latents ' f'({self.num_latents}).') num_latents = num_latents or self.num_latents # Calculate sequence lengths based on target tokens. sequence_lengths = slicing.get_sequence_lengths( decoder_target_tokens=decoder_target_tokens) if decode: decoder_mask = None else: decoder_mask = attention.make_decoder_mask( decoder_target_tokens=decoder_target_tokens, sequence_lengths=sequence_lengths, num_latents=num_latents, dtype=self.dtype, decoder_causal_attention=decoder_causal_attention) # We reuse Decoder class, which can optionally takes in encoded and # encoder_decoder_mask. These are used when Decoder is used in the context # of encoder-decoder model. For LM, we don't have an encoder. So set these # to None. return self.decoder( # pytype: disable=attribute-error encoder_outputs=None, decoder_input_tokens=decoder_input_tokens, decoder_positions=decoder_positions, decoder_mask=decoder_mask, encoder_decoder_mask=None, segment_ids=decoder_segment_ids, enable_dropout=enable_dropout, decode=decode, max_decode_length=max_decode_length, prefill=prefill, prefill_lengths=prefill_lengths, num_latents=num_latents, sequence_lengths=sequence_lengths, **kwargs) def create_residuals_and_queries( layer_input: Array, x: Array, logit_mask, *, num_latents: Optional[Array], sequence_lengths: Array) -> Tuple[Array, Array, Optional[Array], Array]: """Slice layer inputs to get versions to use as queries.""" if x.shape[-2] > num_latents: layer_input_residuals = slicing.slice_sequences_xmap( layer_input, sequence_lengths, num_latents, axis_within_xmap=0) x_queries = slicing.slice_sequences_xmap( x, sequence_lengths, num_latents, axis_within_xmap=0) query_offset = slicing.sequence_slice_start(sequence_lengths, num_latents) else: layer_input_residuals = layer_input x_queries = x query_offset = None if logit_mask.shape[-2] > num_latents: logit_mask_queries = slicing.slice_sequences_vmap( logit_mask, sequence_lengths, num_latents, axis_within_vmap=0) else: logit_mask_queries = logit_mask return layer_input_residuals, x_queries, query_offset, logit_mask_queries
{ "content_hash": "672fe73929c21abc9964ffa9cf6f97f3", "timestamp": "", "source": "github", "line_count": 407, "max_line_length": 122, "avg_line_length": 40.842751842751845, "alnum_prop": 0.6456716597485411, "repo_name": "google/flaxformer", "id": "2a3b83cb50af21d9f15647459fbb2217d14e7a51", "size": "17199", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "flaxformer/architectures/perceiver_ar/perceiver_ar_architecture.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1504920" } ], "symlink_target": "" }
import os import shutil import sys import subprocess import json import yaml import inflect from scaffold.custom_fields import * from scaffold.modules.replace_string import replace_string, \ new_route_string, menu_string, js_src_string, test_script_string, conf_js_string from scaffold.modules.errors import BlueprintError blueprint_file = 'app/__init__.py' test_script = 'tests.bash' yaml_file = sys.argv[1] app_js_file = "app/templates/static/js/app.js" main_index_file = "app/templates/index.html" conf_js_file = "conf.js" # Error classes def make_plural(resource): # https://pypi.python.org/pypi/inflect p = inflect.engine() if p.singular_noun(resource): resources = resource resource = p.singular_noun(resource) return resource, resources else: resources = p.plural(resource) return resource, resources def generate_files(module_path): app_files = ['views.py', 'models.py', '__init__.py', 'tests.py'] for file in app_files: # Generate App files if file == "views.py": with open(os.path.join(module_path, 'views.py'), "w") as new_file: with open("scaffold/app/views.py", "r") as old_file: for line in old_file: new_file.write(line.format(resource=resource, resources=resources, Resources=resources.title(), Resource=resource.title(), add_fields=add_fields)) elif file == "models.py": with open(os.path.join(module_path, 'models.py'), "w") as new_file: with open("scaffold/app/models.py", "r") as old_file: for line in old_file: new_file.write(line.format(resource=resource, resources=resources, Resources=resources.title(), db_rows=db_rows, schema=schema, meta=meta, init_self_vars=init_self_vars, init_args=init_args)) elif file == "__init__.py": with open(os.path.join(module_path, '__init__.py'), "w") as new_file: with open("scaffold/app/__init__.py", "r") as old_file: for line in old_file: new_file.write(line) # Tests elif file == "tests.py": with open(os.path.join(module_path, 'test_{}.py'.format(resources)), "w") as new_file: with open("scaffold/app/tests.py", "r") as old_file: for line in old_file: new_file.write(line.format(resource=resource, resources=resources, Resources=resources.title(), test_add_fields=json.dumps( test_add_fields), test_update_fields=json.dumps( test_update_fields))) def register_blueprints(): string_to_insert_after = '# Blueprints' new_blueprint = """ # Blueprints from app.{resources}.views import {resources} app.register_blueprint({resources}, url_prefix='/api/v1/{resources}')""".format(resources=resources) with open(blueprint_file, 'r+') as old_file: filedata = old_file.read() if string_to_insert_after in filedata: # replace the first occurrence new_filedata = filedata.replace( string_to_insert_after, new_blueprint, 1) with open(blueprint_file, 'w') as new_file: new_file.write(new_filedata) print("Registered Blueprints for ", resources) else: raise BlueprintError() def clean_up(module_path): if os.path.isdir(module_path): shutil.rmtree(module_path) def run_autopep8(): try: cmd_output = subprocess.check_output( ['autopep8', '--in-place', '--recursive', 'app']) print("Ran autopep8") except subprocess.CalledProcessError: print("autopep8 failed") raise # Main Code Start # # Parse YAML file with open(yaml_file, "r") as file: yaml_data = yaml.load(file) for module, fields in yaml_data.items(): # make module name plural resource, resources = make_plural(module) # Start strings to insert into models db_rows = "" schema = "" meta = "" init_self_vars = "" init_args = "" # End strings to insert into models # Start strings to insert into views add_fields = "" # strings to insert into _form.html form_args = [] form_fields = "" # strings to insert into update.html update_form_args = "" # strings to insert into index.html field_table_headers = "" index_fields = "" # strings to insert into tests.py test_add_fields = {} test_update_fields = {} # Fields to insert into controller.js controller_fields = "" radio_button_default ="" # Fields to add to protractor spec.js protractor_page_objects = "" protractor_edit_elments = "" protractor_add_elments = "" for f in fields: field, field_type = f.split(':') if field_type == "string": db_rows += """ {} = db.Column(db.String(250), nullable=False)""".format(field) schema += """ {} = fields.String(validate=not_blank)""".format(field) test_add_fields[field] = string_test test_update_fields[field] = update_string_test elif field_type == "boolean": db_rows += """ {} = db.Column(db.Boolean, nullable=False)""".format(field) schema += """ {} = fields.Boolean(required=True)""".format(field) test_add_fields[field] = boolean_test test_update_fields[field] = update_boolean_test elif field_type == "integer": db_rows += """ {} = db.Column(db.Integer, nullable=False)""".format(field) schema += """ {} = fields.Integer(required=True)""".format(field) test_add_fields[field] = integer_test test_update_fields[field] = update_integer_test elif field_type == "biginteger": db_rows += """ {} = db.Column(db.BigInteger, nullable=False)""".format(field) schema += """ {} = fields.Integer(required=True)""".format(field) test_add_fields[field] = big_integer_test test_update_fields[field] = update_big_integer_test elif field_type == "email": db_rows += """ {} = db.Column(db.String(250), nullable=False)""".format(field) schema += """ {} = fields.Email(validate=not_blank)""".format(field) test_add_fields[field] = email_test test_update_fields[field] = update_email_test elif field_type == "url": db_rows += """ {} = db.Column(db.String(250), nullable=False)""".format(field) schema += """ {} = fields.URL(validate=not_blank)""".format(field) test_add_fields[field] = url_test test_update_fields[field] = update_url_test elif field_type == "datetime": db_rows += """ {} = db.Column(db.TIMESTAMP,server_default=db.func.current_timestamp(),nullable=False)""".format(field) schema += """ {} = fields.DateTime(required=True)""".format(field) test_add_fields[field] = date_time_test test_update_fields[field] = update_date_time_test elif field_type == "date": db_rows += """ {} = db.Column(db.Date, nullable=False)""".format(field) schema += """ {} = fields.Date(required=True)""".format(field) test_add_fields[field] = date_test test_update_fields[field] = update_date_test elif field_type == "decimal": db_rows += """ {} = db.Column(db.Numeric, nullable=False)""".format(field) schema += """ {} = fields.Decimal(as_string=True)""".format(field) test_add_fields[field] = decimal_test test_update_fields[field] = update_decimal_test elif field_type == "text": db_rows += """ {} = db.Column(db.Text, nullable=False)""".format(field) schema += """ {} = fields.String(validate=not_blank)""".format(field) test_add_fields[field] = text_test test_update_fields[field] = update_text_test # models meta += """ '{}', """.format(field) init_args += """ {}, """.format(field) init_self_vars += """ self.{field} = {field}""".format(field=field) # Views add_fields += add_string.format(field) #_form.html form_args.append( """{resource}_{field} = ''""".format(resource=resource, field=field)) field_table_headers += """ <th>{field}</th> """.format(field=field) index_fields += """<td>{{{{ result['{field}'] }}}}</td>""".format( field=field) update_form_args += """{resource}_{field} = {resource}.{field}, """.format(resource=resource, field=field) # controller.js controller_fields += controller_field.format(field=field) # Generate files with the new fields module_dir = os.path.join('app', resources) try: os.mkdir(module_dir) try: generate_files(module_dir) print('{} created successfully'.format(module_dir)) register_blueprints() # Add tests to test.bash replace_string( resource, resources, test_script, "#TESTS", test_script_string) run_autopep8() except: clean_up(module_dir) raise except: raise
{ "content_hash": "7ac7b1a6c250e2e4a5e45b1c9d961e13", "timestamp": "", "source": "github", "line_count": 285, "max_line_length": 118, "avg_line_length": 37.03859649122807, "alnum_prop": 0.5149677908298598, "repo_name": "jking6884/RESTapi", "id": "f0623a851391e397851b3ae9816787634b9da53f", "size": "10578", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scaffold.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "15274" }, { "name": "HTML", "bytes": "14505" }, { "name": "JavaScript", "bytes": "49098" }, { "name": "Nginx", "bytes": "1545" }, { "name": "Python", "bytes": "76769" }, { "name": "Shell", "bytes": "502" } ], "symlink_target": "" }
from __future__ import print_function import sys import re import optparse import os import token import itertools import lib2to3.pgen2.tokenize import lib2to3.pygram import lib2to3.pytree import hashlib import difflib import traceback from cStringIO import StringIO def _diff_strings(a, b, filename): """Return a unified diff of two strings.""" a = a.splitlines() b = b.splitlines() return '\n'.join(difflib.unified_diff( a, b, filename, filename, "(original)", "(refactored)", lineterm="", )) def module_name_for_path(path): head, module = os.path.split(os.path.splitext(path)[0]) while os.path.exists(os.path.join(head, '__init__.py')): head, tail = os.path.split(head) module = tail + '.' + module # key_base pseudopackages; only the few that the external tools are in, # and only for the the old Western Post environment. ks_tools = os.environ.get('KS_TOOLS') if ks_tools and ks_tools in head and '/key_base/' in head: if '/maya/python/' in head: return 'ks.maya.' + module if '/key_base/python/' in head: return 'ks.' + module return module def resolve_relative(relative, module): if not relative.startswith('.'): return 0, relative orig_relative = relative parts = module.split('.') while relative.startswith('.'): relative = relative[1:] parts.pop(-1) parts.append(relative) return len(orig_relative) - len(relative), '.'.join(x for x in parts if x) def _iter_chunked_source(source): driver = lib2to3.pgen2.driver.Driver(lib2to3.pygram.python_grammar, lib2to3.pytree.convert) if hasattr(lib2to3.pgen2.tokenize, 'detect_encoding'): string_io = StringIO(source) encoding, _ = lib2to3.pgen2.tokenize.detect_encoding(string_io.readline) else: encoding = 'utf8' tree = driver.parse_string(source) for is_source, group in itertools.groupby(_iter_chunked_node(tree), lambda (is_source, _): is_source): yield is_source, ''.join((value.encode(encoding) if isinstance(value, unicode) else value) for _, value in group) def _iter_chunked_node(node): if isinstance(node, lib2to3.pytree.Node): for child in node.children: for chunk in _iter_chunked_node(child): yield chunk else: # Deal with comments and spaces; comments -> False prefix = node.prefix or '' yield (not prefix.strip().startswith('#')), prefix # Everything that isn't a STRING can have identifiers. yield (node.type not in (token.STRING, )), node.value def rewrite(source, mapping, module_name=None, non_source=False, **kw): rewriter = Rewriter(mapping, module_name, **kw) if non_source: return rewriter(source) rewritten = [] # Break the source into chunks that we may find identifiers in, and those # that we won't. for is_source, source in _iter_chunked_source(source): # Don't bother looking in comments and strings. if is_source: rewritten.append(rewriter(source)) else: rewritten.append(source) return ''.join(rewritten) class Rewriter(object): _direct_import_re = re.compile(r''' import\s+ ( (?: (?:,\s*)? # Splitting consecutive imports. [\w\.]+ # The thing being imported. (?:\s+as\s+\w+\s*?)? # It's new name. | \* )+ ) ''', re.X) _import_from_re = re.compile(r''' from\s+ ([\w\.]+)\s+ import\s+ ( (?: (?:,\s*)? # Splitting consecutive imports. \w+ # The thing being imported. (?:\s+as\s+\w+\s*?)? # It's new name. | \* )+ ) ''', re.X) _usage_re = re.compile(r''' [a-zA-Z_][a-zA-Z_0-9]* (:?.[a-zA-Z_][a-zA-Z_0-9]*)* ''', re.X) def __init__(self, mapping, module_name, absolute=None): self.mapping = mapping self.module_name = module_name self.absolute = absolute self.substitutions = {} def __call__(self, source): source = self._import_from_re.sub(self.import_from, source) source = self._direct_import_re.sub(self.direct_import, source) source = self._usage_re.sub(self.usage, source) for from_, to in self.substitutions.iteritems(): source = source.replace(from_, to) return source def add_substitution(self, source): tag = '__%s__' % hashlib.md5(source).hexdigest() self.substitutions[tag] = source return tag def split_as_block(self, block): for chunk in block.split(','): name, as_ = (re.split(r'\s+as\s+', chunk) + [None])[:2] name = name.strip() as_ = as_ and as_.strip() yield name, as_ def import_from(self, m): # print 'import_from:', m.groups() was_relative, base = resolve_relative(m.group(1), self.module_name) imports = [] # Convert the full names of every item. for name, ident in self.split_as_block(m.group(2)): full_name = base + '.' + name imports.append(( self.convert_module(full_name) or full_name, ident, )) # Assert that every item shares the same prefix. new_base = imports[0][0].split('.')[:-1] if any(x[0].split('.')[:-1] != new_base for x in imports[1:]): raise ValueError('conflicting rewrites in single import') # Restore the relative levels. if was_relative if self.absolute is None else not self.absolute: new_base = self.make_relative(new_base) else: new_base = '.'.join(new_base) # Rebuild the "as" block. imports = [(name.split('.')[-1], ident) for name, ident in imports] imports = [('%s as %s' % (name, ident) if ident else name) for name, ident in imports] # Format the final source. return self.add_substitution('from %s import %s' % ( new_base, ', '.join(imports) )) def make_relative(self, target): base = (self.convert_module(self.module_name) or self.module_name).split('.') while target and base and target[0] == base[0]: target = target[1:] base = base[1:] return '.' * len(base) + '.'.join(target) def direct_import(self, m): # print 'direct_import:', m.groups() imports = [] # Convert the full names of every item. for name, ident in self.split_as_block(m.group(1)): imports.append(( self.convert_module(name) or name, ident, )) # Rebuild the "as" block. imports = [('%s as %s' % (name, ident) if ident else name) for name, ident in imports] # Format the final source. return self.add_substitution('import ' + ', '.join(imports)) def usage(self, m): # print 'usage:', m.group(0) name = m.group(0) name = self.convert_identifier(name) or name return name def convert_module(self, name): parts = name.split('.') for old, new in self.mapping.iteritems(): old_parts = old.split('.') if parts[:len(old_parts)] == old_parts: return '.'.join([new] + parts[len(old_parts):]) def convert_identifier(self, name): parts = name.split('.') for old, new in self.mapping.iteritems(): old_parts = old.split('.') if parts[:len(old_parts)] == old_parts: return '.'.join([new] + parts[len(old_parts):]) def main(): opt_parser = optparse.OptionParser(usage="%prog [options] from:to... path...") opt_parser.add_option('-w', '--write', action='store_true') opt_parser.add_option('-a', '--absolute', action='store_true') opts, args = opt_parser.parse_args() renames = [] for i, arg in enumerate(args): if ':' not in arg: break old, new = arg.split(':', 1) renames.append((old, new)) args = args[i:] if not renames or not args: opt_parser.print_usage() exit(1) visited_paths = set() changed = set() def process(dir_name, path): if path.startswith('._') or not path.endswith('.py'): return if dir_name is not None: path = os.path.join(dir_name, path) if path in visited_paths: return visited_paths.add(path) print('#', path, file=sys.stderr) module_name = module_name_for_path(path) original = open(path).read().rstrip() + '\n' refactored = rewrite(original, dict(renames), module_name, absolute=opts.absolute) if re.sub(r'\s+', '', refactored) != re.sub(r'\s+', '', original): print(_diff_strings(original, refactored, path)) if opts.write: with open(path, 'wb') as fh: fh.write(refactored) for arg in args: try: process(None, arg) except Exception: print('# ERROR during', arg, file=sys.stderr) traceback.print_exc() for dir_name, dir_names, file_names in os.walk(arg): dir_names[:] = [x for x in dir_names if not x.startswith('.')] for file_name in file_names: try: process(dir_name, file_name) except Exception: print('# ERROR during', os.path.join(dir_name, file_name), file=sys.stderr) traceback.print_exc() print('Modified (%d)' % len(changed), file=sys.stderr) print('\n'.join(sorted(changed)), file=sys.stderr) if __name__ == '__main__': main()
{ "content_hash": "419ad09b5a12834f9d274dccf9338853", "timestamp": "", "source": "github", "line_count": 333, "max_line_length": 121, "avg_line_length": 29.92792792792793, "alnum_prop": 0.5542845675296006, "repo_name": "westernx/metatools", "id": "d0279d076c0fe3f0f989e84a92b8e0bad150a1bd", "size": "9966", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metatools/imports/rewrite.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "677" }, { "name": "Python", "bytes": "114302" }, { "name": "Shell", "bytes": "1239" } ], "symlink_target": "" }
from collections.abc import Iterator import os import pathlib class Reader(Iterator): def __init__(self, file=None): if file is not None: self.set_file(file) else: self.file = None self.reset() def set_file(self, file): if isinstance(file, pathlib.PurePath): file = str(file) file = os.path.expanduser(file) if not os.path.exists(file): raise FileNotFoundError("file was not found: '{}'" .format(file)) self.file = file self.reset() def __iter__(self): self._iterator = self._get_iterator() return self def __next__(self): try: return self._iterator.__next__() except Exception as e: self.reset() raise e def read(self, file=None): if file is not None: self.set_file(file) items = [item for item in self] return items def read_next(self): if self._iterator is None: self._iterator = self._get_iterator() return self.__next__() def reset(self): self._iterator = None def _get_iterator(self): raise NotImplementedError def __getstate__(self): state = self.__dict__.copy() state['_iterator'] = None return state def __setstate__(self, state): self.__dict__.update(state) class LineReader(Reader): def _get_iterator(self): with open(self.file, mode='r', encoding='utf-8') as f: for line in f: yield line.strip() class CsvReader(Reader): def __init__(self, file=None, delimiter=','): super().__init__(file) self.delimiter = delimiter def _get_iterator(self): with open(self.file, mode='r', encoding='utf-8') as f: for line in f: yield line.strip().split(self.delimiter) class ZipReader(Reader): def __init__(self, readers): self._readers = readers def set_file(self, file): if not isinstance(file, (tuple, list)): file = [file] return self.set_files(file) def set_files(self, files): if len(files) != len(self._readers): raise ValueError('files must be given as many as readers') self.reset() for reader, file in zip(self._readers, files): if file is not None: reader.set_file(file) else: reader.file = None def reset(self): for reader in self._readers: reader.reset() self._iterator = None def _get_iterator(self): def _yield_null(): while True: yield None return zip(*[reader if reader.file is not None else _yield_null() for reader in self._readers]) def _create_root(format='conll', extra_fields=None): if format == 'conll': root = { 'id': 0, 'form': "<ROOT>", 'lemma': "<ROOT>", 'cpostag': "ROOT", 'postag': "ROOT", 'feats': "_", 'head': 0, 'deprel': "root", 'phead': "_", 'pdeprel': "_", } elif format == 'conll09': root = { 'id': 0, 'form': "<ROOT>", 'lemma': "<ROOT>", 'plemma': "_", 'pos': "ROOT", 'ppos': "_", 'feat': "_", 'pfeat': "_", 'head': 0, 'phead': "_", 'deprel': "root", 'pdeprel': "_", 'fillpred': "_", 'pred': "_", 'apreds': [], } else: raise ValueError("Format `` is not supported.".format(format)) if extra_fields: _append_fields(root, extra_fields) return root def _parse_conll(text, extra_fields=None): tokens = [_create_root('conll', extra_fields)] for line in [text] if isinstance(text, str) else text: line = line.strip() if not line: if len(tokens) > 1: yield tokens tokens = [_create_root('conll', extra_fields)] elif line.startswith('#'): continue else: cols = line.split("\t") token = { 'id': int(cols[0]), 'form': cols[1], 'lemma': cols[2], 'cpostag': cols[3], 'postag': cols[4], 'feats': cols[5], 'head': int(cols[6]), 'deprel': cols[7], 'phead': cols[8], 'pdeprel': cols[9], } if extra_fields: _append_fields(token, extra_fields, cols) tokens.append(token) if len(tokens) > 1: yield tokens def _append_fields(token, fields, cols=None): for name, field in fields.items(): if isinstance(field, dict): if cols is None: if token['id'] == 0 and 'root' in field: value = field['root'] elif 'default' in field: value = field['default'] else: raise IndexError('cannot extract field') else: index = field['index'] assert isinstance(index, int) if len(cols) <= index and 'default' in field: value = field['default'] else: # This raises IndexError if len(cols) <= field value = cols[index] else: assert isinstance(field, int) if cols is None and token['id'] == 0: value = None else: # This raises raise IndexError if len(cols) <= field value = cols[field] token[name] = value def _parse_conll09(text, extra_fields=None): if extra_fields is not None: raise NotImplementedError tokens = [_create_root(format='conll09')] for line in [text] if isinstance(text, str) else text: line = line.strip() if not line: if len(tokens) > 1: yield tokens tokens = [_create_root(format='conll09')] elif line.startswith('#'): continue else: cols = line.split("\t") token = { 'id': int(cols[0]), 'form': cols[1], 'lemma': cols[2], 'plemma': cols[3], 'pos': cols[4], 'ppos': cols[5], 'feat': cols[6], 'pfeat': cols[7], 'head': int(cols[8]), 'phead': cols[9], 'deprel': cols[10], 'pdeprel': cols[11], 'fillpred': cols[12], 'pred': cols[13], 'apreds': cols[14:], } tokens.append(token) if len(tokens) > 1: yield tokens class ConllReader(Reader): def __init__(self, file=None, format='conll', extra_fields=None): super().__init__(file) self.format = format self.extra_fields = extra_fields def _get_iterator(self): if self.format == 'conll': parse_func = _parse_conll elif self.format == 'conll09': parse_func = _parse_conll09 else: raise ValueError("Format `` is not supported.".format(format)) with open(self.file, mode='r', encoding='utf-8') as f: yield from parse_func(f, self.extra_fields) def read_conll(file, format='conll', extra_fields=None): if isinstance(file, pathlib.PurePath): file = str(file) with open(file, mode='r', encoding='utf-8') as f: return parse_conll(f, format, extra_fields) def parse_conll(text, format='conll', extra_fields=None): if format == 'conll': return list(_parse_conll(text, extra_fields)) elif format == 'conll09': return list(_parse_conll09(text, extra_fields)) else: raise ValueError("Format `` is not supported.".format(format)) def _parse_tree(text, left_bracket='(', right_bracket=')'): stack = [] _buffer = [] for line in [text] if isinstance(text, str) else text: line = line.lstrip() if not line: continue for char in line: if char == left_bracket: stack.append([]) elif char == ' ' or char == '\n': if _buffer: stack[-1].append(''.join(_buffer)) _buffer = [] elif char == right_bracket: if _buffer: stack[-1].append(''.join(_buffer)) _buffer = [] if len(stack) > 1: stack[-2].append(stack.pop()) else: yield stack.pop() else: _buffer.append(char) class TreeReader(Reader): def __init__(self, file=None, left_bracket='(', right_bracket=')'): super(TreeReader, self).__init__(file) self.right_bracket = right_bracket self.left_bracket = left_bracket def _get_iterator(self): with open(self.file, mode='r', encoding='utf-8') as f: yield from _parse_tree(f, self.left_bracket, self.right_bracket) def read_tree(file, left_bracket='(', right_bracket=')'): if isinstance(file, pathlib.PurePath): file = str(file) with open(file, mode='r', encoding='utf-8') as f: return list(_parse_tree(f, left_bracket, right_bracket)) def parse_tree(text, left_bracket='(', right_bracket=')'): return list(_parse_tree(text, left_bracket, right_bracket)) class ContextualizedEmbeddingsReader(Reader): def _get_iterator(self): with ContextualizedEmbeddingsFile.open(self.file) as f: for value in f: yield value class ContextualizedEmbeddingsFile(object): def __init__(self, file): handle = None try: import h5py handle = h5py.File(file, 'r') except Exception as e: raise e self._file = file self._handle = handle self._sentence_id = -1 @classmethod def open(cls, file): return cls(file) def close(self): if not self.closed: self._handle.close() self._handle = None @property def closed(self): return self._handle is None def _check_closed(self): if self.closed: raise ValueError("I/O operation on closed file.") def __del__(self): try: self.close() except Exception: pass def __enter__(self): self._check_closed() return self def __exit__(self, exc_type, exc_value, traceback): self.close() def __iter__(self): self._check_closed() return self def __next__(self): self._check_closed() self._sentence_id += 1 key = str(self._sentence_id) if key not in self._handle: raise StopIteration value = self._handle[key][...] return value
{ "content_hash": "5da3fb9e18983d1a83fb7580c7216ad8", "timestamp": "", "source": "github", "line_count": 394, "max_line_length": 76, "avg_line_length": 28.598984771573605, "alnum_prop": 0.49148029818956335, "repo_name": "chantera/teras", "id": "338fa5de45fb4c77ac712417104a79b41036dbae", "size": "11268", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "teras/io/reader.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "121393" } ], "symlink_target": "" }
import logging import const import os FILE_NAME = os.path.join(const.Constant.conf_dir, 'musicbox.log') if os.path.isdir(const.Constant.conf_dir) is False: os.mkdir(const.Constant.conf_dir) with open(FILE_NAME, 'a+') as f: f.write('#' * 80) f.write('\n') def getLogger(name): log = logging.getLogger(name) log.setLevel(logging.DEBUG) # File output handler fh = logging.FileHandler(FILE_NAME) fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s:%(lineno)s: %(message)s')) # NOQA log.addHandler(fh) return log
{ "content_hash": "9416ddbf598b03c625dbef8ed894ec47", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 113, "avg_line_length": 24.4, "alnum_prop": 0.6688524590163935, "repo_name": "Catofes/musicbox", "id": "ed4db362a71ce58c329e0c21660bd6af91d9cdbe", "size": "774", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "NEMbox/logger.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "109285" } ], "symlink_target": "" }
"""SEE VirtualBox Resources. This module provides an API for creating a virDomain controlling a VirtualBox Virtual Machine. Configuration:: { "domain": { "configuration": "/etc/myconfig/see/domain.xml", }, "disk": { "image": { "uri": "/var/mystoragepool/image.vdi", "provider": "see.image_providers.DummyProvider" } } } Domain: The User must specify the path of the domain XML configuration file for the Linux Container. The following fields in the configuration file are replaced or added if missing:: * name * uuid * devices Disk: The Disk section must contain the image field with the absolute path to the disk image file. """ import libvirt import xml.etree.ElementTree as etree from see.context.resources import resources from see.context.resources.helpers import subelement, tag_disk def domain_xml(identifier, xml, disk_path): """Fills the XML file with the required fields. * name * uuid * devices """ domain = etree.fromstring(xml) subelement(domain, './/name', 'name', identifier) subelement(domain, './/uuid', 'uuid', identifier) devices = subelement(domain, './/devices', 'devices', None) disk = subelement(devices, './/disk', 'disk', None, type='file', device='disk') subelement(disk, './/source', 'source', None, file=disk_path) return etree.tostring(domain).decode('utf-8') def domain_create(hypervisor, identifier, configuration, disk_path): """libvirt Domain definition. @raise: ConfigError, IOError, libvirt.libvirtError. """ with open(configuration['configuration']) as config_file: domain_config = config_file.read() xml = domain_xml(identifier, domain_config, disk_path) return hypervisor.defineXML(xml) def domain_delete(domain, logger): """libvirt domain undefinition. @raise: libvirt.libvirtError. """ if domain is not None: try: if domain.isActive(): domain.destroy() except libvirt.libvirtError: logger.exception("Unable to destroy the domain.") try: domain.undefine() except libvirt.libvirtError: try: domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA) # domain with snapshots except libvirt.libvirtError: logger.exception("Unable to undefine the domain.") class VBoxResources(resources.Resources): """Libvirt resources wrapper for Virtualbox. It wrappes libvirt hypervisor connection and domain, exposing a clean way to initialize and clean them up. """ def __init__(self, identifier, configuration): super(VBoxResources, self).__init__(identifier, configuration) self._domain = None self._hypervisor = None @property def hypervisor(self): return self._hypervisor @property def domain(self): return self._domain def allocate(self): """Initializes libvirt resources.""" disk_path = self.provider_image tag_disk(self.provider_image) self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'vbox:///session')) self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path) def deallocate(self): """Releases all resources.""" if self._domain is not None: domain_delete(self._domain, self.logger) if self._hypervisor is not None: try: self._hypervisor.close() except Exception: self.logger.exception("Unable to close hypervisor connection.")
{ "content_hash": "1a47e9bdb6024a4b35dac934265113ab", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 109, "avg_line_length": 27.397058823529413, "alnum_prop": 0.6427804616210413, "repo_name": "F-Secure/see", "id": "0f985a8886005c92eebafd9d595dc0e1f48b16a7", "size": "4303", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "see/context/resources/vbox.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "224049" } ], "symlink_target": "" }
""" Runs the auto routine. Called once. """ from networktables import NetworkTable import time import wpilib class Autonomous(object): def __init__(self, drive, randy, frontGear, backGear, vision): self.drive = drive self.randy = randy self.frontGear = frontGear self.backGear = backGear self.vision = vision self.sd = NetworkTable.getTable('SmartDashboard') def run(self): self.randy.run(True) # deploy Randy if (self.sd.getBoolean('autoAngle') == True): # Drive forward startTime = time.clock() while (time.clock() - startTime < 2.3): self.drive.driveToAngle(-0.65, 0, True) # Stop self.drive.cartesian(0, 0, 0) # Turn 60 or -60 degrees if (self.sd.getBoolean('isLeft') == True): self.drive.driveToAngle(0, -60, False) else: self.drive.driveToAngle(0, 60, False) # Do vision if (self.vision.alignToPeg(direction=1) == False): # if returns an error, stop auto return False # Stop self.drive.cartesian(0, 0, 0) ''' Center peg ''' startTime = time.clock() while (time.clock() - startTime < 5.2): self.drive.cartesian(0, -0.3, 0.025) self.drive.cartesian(0, 0, 0) # Activate front gear self.frontGear.run(True) # Stop Randy self.randy.run(False)
{ "content_hash": "1d9933d2c26e84817a1ae39fa4dbdae6", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 95, "avg_line_length": 28.61111111111111, "alnum_prop": 0.5346278317152103, "repo_name": "3299/2017", "id": "629ee4aba0f47ff216e10dc3da8aa2847cc12319", "size": "1545", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "autonomous.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24567" } ], "symlink_target": "" }
def build_profile(first, last, **user_info): profile = {} profile['first_name'] = first profile['last_name'] = last for key, value in user_info.items(): profile[key] = value return profile user_profile = build_profile('Juan', 'Calzoncit', location = 'Saltillo', field = 'Systems Engineering', semester = 7) print(user_profile)
{ "content_hash": "14e900709939047b2837811f9a77c4e5", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 117, "avg_line_length": 24.266666666666666, "alnum_prop": 0.6401098901098901, "repo_name": "AnhellO/DAS_Sistemas", "id": "5129f39d1feeb15de4d06fefe58f420e60d92773", "size": "364", "binary": false, "copies": "1", "ref": "refs/heads/ene-jun-2022", "path": "Ene-Jun-2022/juan-alejandro-calzoncit-rodriguez/práctica-2/capitulo-8/8-13.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "8515" }, { "name": "Go", "bytes": "25845" }, { "name": "HTML", "bytes": "36671" }, { "name": "Python", "bytes": "716604" } ], "symlink_target": "" }
import unittest from unittest import mock import grid import util class ProblemGenerateTests(unittest.TestCase): @mock.patch('util.grid.Grid') def test_generates_a_grid(self, GridMock): GridMock.return_value.open_cells = [(0,0), (0, 1), (1, 0)] util.generate_problem(10, 10, 0.1) GridMock.assert_called_once_with(10, 10, 0.1) def test_generates_a_grid_with_correct_width(self): g, _, _ = util.generate_problem(32, 16, 0) self.assertEqual(g.width, 32) def test_generates_a_grid_with_correct_height(self): g, _, _ = util.generate_problem(32, 16, 0) self.assertEqual(g.height, 16) def test_generates_a_grid_with_obstacles(self): g, _, _ = util.generate_problem(10, 10, 0.2) obs = sum([1 for x, y in g.grid if g.grid[x, y] == grid.OBSTACLE]) self.assertAlmostEqual(obs/100, 0.2, delta=0.1) def test_start_and_goal_positions_are_not_the_same(self): for i in range(100): with self.subTest(i=i): g, start, end = util.generate_problem(5, 5, 0.2) self.assertNotEqual(start, end)
{ "content_hash": "fe711e3bcc84cbd2f56b11b0b611d5fe", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 74, "avg_line_length": 36.516129032258064, "alnum_prop": 0.6166077738515902, "repo_name": "XeryusTC/search", "id": "d375708f85f097a34815fd9c21d9f13d3ffc4370", "size": "1156", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_util.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "29681" } ], "symlink_target": "" }
from django.conf import settings from django.db import migrations from corehq.sql_db.operations import RawSQLMigration migrator = RawSQLMigration(('corehq', 'sql_proxy_accessors', 'sql_templates'), { 'PL_PROXY_CLUSTER_NAME': settings.PL_PROXY_CLUSTER_NAME }) class Migration(migrations.Migration): dependencies = [ ('sql_proxy_accessors', '0010_merge'), ] operations = [ migrations.RunSQL( "DROP FUNCTION IF EXISTS save_ledger_values(TEXT[], form_processor_ledgervalue[]);", "SELECT 1" ), migrator.get_migration('get_ledger_transactions_for_case.sql'), ]
{ "content_hash": "7cd4fb84142a90ee0f9e8a49b8026efb", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 96, "avg_line_length": 27.695652173913043, "alnum_prop": 0.6640502354788069, "repo_name": "dimagi/commcare-hq", "id": "14c907fc8fb8cc1fa7db644624546881fa354085", "size": "637", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "corehq/sql_proxy_accessors/migrations/0011_ledger_transactions.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "82928" }, { "name": "Dockerfile", "bytes": "2341" }, { "name": "HTML", "bytes": "2589268" }, { "name": "JavaScript", "bytes": "5889543" }, { "name": "Jinja", "bytes": "3693" }, { "name": "Less", "bytes": "176180" }, { "name": "Makefile", "bytes": "1622" }, { "name": "PHP", "bytes": "2232" }, { "name": "PLpgSQL", "bytes": "66704" }, { "name": "Python", "bytes": "21779773" }, { "name": "Roff", "bytes": "150" }, { "name": "Shell", "bytes": "67473" } ], "symlink_target": "" }
from Global.Global_consts import Global_consts import socket class Network_utils(): @staticmethod def get_udp_socket(host, port): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((host,port)) return s @staticmethod def get_test_udp_socket(): host = Global_consts.default_ip port = 1234 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.settimeout(3.0) s.bind((host,port)) return s @staticmethod def get_default_server_addr(): return (Global_consts.default_ip, Global_consts.default_port)
{ "content_hash": "f7b5de30f65fb5783241112429da989c", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 69, "avg_line_length": 26.565217391304348, "alnum_prop": 0.6317512274959084, "repo_name": "mikesligo/distributed-search", "id": "c7f4d33fa1ac13c2e1e9527f4d09eb40e2b40404", "size": "611", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/networking/Network_utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "21100" } ], "symlink_target": "" }
from __future__ import unicode_literals import frappe def execute(): if not frappe.db.exists("DocType", "Data Import Beta"): return frappe.db.sql("DROP TABLE IF EXISTS `tabData Import Legacy`") frappe.rename_doc('DocType', 'Data Import', 'Data Import Legacy') frappe.db.commit() frappe.db.sql("DROP TABLE IF EXISTS `tabData Import`") frappe.rename_doc('DocType', 'Data Import Beta', 'Data Import')
{ "content_hash": "b6edd495d823116a89d9622a59bfc278", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 66, "avg_line_length": 31.46153846153846, "alnum_prop": 0.7212713936430318, "repo_name": "vjFaLk/frappe", "id": "f3eed6253c4c76dec5eb2ed8d9862b77b89e263a", "size": "510", "binary": false, "copies": "1", "ref": "refs/heads/parsimony-production", "path": "frappe/patches/v12_0/replace_old_data_import.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "290337" }, { "name": "HTML", "bytes": "179507" }, { "name": "JavaScript", "bytes": "2179734" }, { "name": "Less", "bytes": "146135" }, { "name": "Makefile", "bytes": "99" }, { "name": "Python", "bytes": "2774237" }, { "name": "SCSS", "bytes": "15721" }, { "name": "Shell", "bytes": "3875" }, { "name": "Vue", "bytes": "95109" } ], "symlink_target": "" }
import click import urllib # very simplistic file extension to mimetype mapping mime_types = { "css": "text/css", "gif": "image/gif", "ico": "image/x-icon", "jpeg": "image/jpeg", "jpg": "image/jpeg", "js": "application/javascript", "png": "image/png", } @click.command() @click.argument('path') def main(path): if "." not in path: raise click.BadParameter("Input file name must have file name extension, e. g. .jpg") extension = path.split(".")[-1].lower() mime_type = mime_types.get(extension, "application/octet-stream") with click.open_file(path, 'r') as inputfile: content = inputfile.read() content = content.encode("base64") content = urllib.quote(content) content = "data:%s;base64,%s" % (mime_type, content) click.echo(content) if __name__ == "__main__": main()
{ "content_hash": "0620428ec6e562e6338056b61f41cfd9", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 93, "avg_line_length": 26.424242424242426, "alnum_prop": 0.6055045871559633, "repo_name": "marians/datauri", "id": "d78c85f6ab9ed4176609c00109d156656a29e206", "size": "936", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "datauri.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1368" } ], "symlink_target": "" }
from py_db import db import argparse from decimal import Decimal db = db('NSBL') # Needs to be run every week after the registers are created # Only works on years >= 2017 # Takes players who play for multiple teams in a season and splits their statistics into multiple entries (1 for each team they played on, as well as one for their full season) # register_primary has more players than other registers because it splits multi-team entries def process(year): if year < 2017: pass else: multi_hitters(year) multi_pitchers(year) def get_current_team(player_name, position, age): if player_name[len(player_name)-1:] in ("*", '#'): search_name = player_name[:len(player_name)-1] else: search_name = player_name curr_team_q = """SELECT team_abb FROM current_rosters JOIN teams USING (team_id, year) WHERE player_name = '%s' AND position = '%s' AND age = %s """ curr_team_qry = curr_team_q % (search_name, position, age) curr_team_query = db.query(curr_team_qry) if curr_team_qry != (): curr_team = curr_team_query[0][0] else: curr_team = '' return curr_team def multi_hitters(year): entries = [] players_list_q = """SELECT year, player_name FROM register_batting_primary WHERE year = %s AND team_abb = '' """ player_list_qry = players_list_q % year players_list = db.query(player_list_qry) for row in players_list: year, player_name = row totals_q = """SELECT player_name, position, age, pa, ab, (h-2b-3b-hr) as 1b, 2b, 3b, hr, r, rbi, bb, k, hbp, sb, cs FROM register_batting_primary JOIN register_batting_secondary USING (year, player_name, team_abb, position, age) JOIN register_batting_analytical USING (year, player_name, team_abb, position, age) WHERE year = %s AND player_name = '%s' AND team_abb = ''; """ totals_qry = totals_q % (year, player_name) totals_query = db.query(totals_qry) if totals_query != (): totals = totals_query[0] player_name, position, age, tot_pa, tot_ab, tot_1b, tot_2b, tot_3b, tot_hr, tot_r, tot_rbi, tot_bb, tot_k, tot_hbp, tot_sb, tot_cs = totals curr_team = get_current_team(player_name, position, age) entry = {} entry['year'] = year entry['team_abb'] = curr_team entry['player_name'] = player_name entry['position'] = position entry['age'] = age prev_teams_q = """SELECT position, age, SUM(pa), SUM(ab), (SUM(h)-SUM(2b)-SUM(3b)-SUM(hr)) as 1b, SUM(2b), SUM(3b), SUM(hr), SUM(r), SUM(rbi), SUM(bb), SUM(k), SUM(hbp), SUM(sb), SUM(cs) FROM register_batting_primary JOIN register_batting_secondary USING (year, player_name, team_abb, position, age) JOIN register_batting_analytical USING (year, player_name, team_abb, position, age) WHERE year = %s AND player_name = '%s' AND team_abb not in ('', '%s'); """ prev_teams_qry = prev_teams_q % (year, player_name, curr_team) prev_teams_query = db.query(prev_teams_qry) if prev_teams_query == ((None, None, None, None, None, None, None, None, None, None, None, None, None, None, None),): prev_teams = ['','',0,0,0,0,0,0,0,0,0,0,0,0,0] elif prev_teams_query != (): prev_teams = prev_teams_query[0] prev_pos, prev_age, prev_pa, prev_ab, prev_1b, prev_2b, prev_3b, prev_hr, prev_r, prev_rbi, prev_bb, prev_k, prev_hbp, prev_sb, prev_cs = prev_teams curr_pa = tot_pa - prev_pa curr_1b = tot_1b - prev_1b curr_2b = tot_2b - prev_2b curr_3b = tot_3b - prev_3b curr_hr = tot_hr - prev_hr curr_r = tot_r - prev_r curr_rbi = tot_rbi - prev_rbi curr_bb = tot_bb - prev_bb curr_k = tot_k - prev_k curr_hbp = tot_hbp - prev_hbp curr_sb = tot_sb - prev_sb curr_cs = tot_cs - prev_cs curr_ab = tot_ab - prev_ab curr_h = curr_1b + curr_2b + curr_3b + curr_hr avg = (float(curr_h)/float(curr_ab)) obp = (float(curr_h)+float(curr_bb)+float(curr_hbp))/float(curr_pa) slg = (float(curr_1b)+2*float(curr_2b)+3*float(curr_3b)+4*float(curr_hr))/float(curr_ab) entry['avg'] = avg entry['obp'] = obp entry['slg'] = slg entry['ab'] = curr_ab entry['h'] = curr_h entry['2b'] = curr_2b entry['3b'] = curr_3b entry['hr'] = curr_hr entry['r'] = curr_r entry['rbi'] = curr_rbi entry['hbp'] = curr_hbp entry['bb'] = curr_bb entry['k'] = curr_k entry['sb'] = curr_sb entry['cs'] = curr_cs entries.append(entry) print entries if entries != []: db.insertRowDict(entries, 'register_batting_primary', replace=True, insertMany=True, rid=0) db.conn.commit() def multi_pitchers(year): entries = [] players_list_q = """SELECT year, player_name FROM register_pitching_primary WHERE year = %s AND team_abb = '' """ player_list_qry = players_list_q % year players_list = db.query(player_list_qry) for row in players_list: year, player_name = row totals_q = """SELECT player_name, position, age, w, l, sv, g, gs, cg, sho, SUM(ROUND(ip) + (10 * (ip - ROUND(ip)) / 3)) AS ip, h, r, er, bb, k, hr, gdp FROM register_pitching_primary WHERE year = %s AND player_name = '%s' AND team_abb = ''; """ totals_qry = totals_q % (year, player_name) totals_query = db.query(totals_qry) if totals_query != (): totals = totals_query[0] player_name, position, age, tot_w, tot_l, tot_sv, tot_g, tot_gs, tot_cg, tot_sho, tot_ip, tot_h, tot_r, tot_er, tot_bb, tot_k, tot_hr, tot_gdp = totals curr_team = get_current_team(player_name, position, age) entry = {} entry['year'] = year entry['team_abb'] = curr_team entry['player_name'] = player_name entry['position'] = position entry['age'] = age prev_teams_q = """SELECT player_name, position, age, w, l, sv, g, gs, cg, sho, SUM(ROUND(ip) + (10 * (ip - ROUND(ip)) / 3)) AS ip, h, r, er, bb, k, hr, gdp FROM register_pitching_primary WHERE year = %s AND player_name = '%s' AND team_abb not in ('', '%s'); """ prev_teams_qry = prev_teams_q % (year, player_name, curr_team) prev_teams_query = db.query(prev_teams_qry) if prev_teams_query == ((None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None),): prev_teams = ['','','',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] elif prev_teams_query != (): prev_teams = prev_teams_query[0] prev_name, prev_pos, prev_age, prev_w, prev_l, prev_sv, prev_g, prev_gs, prev_cg, prev_sho, prev_ip, prev_h, prev_r, prev_er, prev_bb, prev_k, prev_hr, prev_gdp = prev_teams curr_w = tot_w - prev_w curr_l = tot_l - prev_l curr_sv = tot_sv - prev_sv curr_g = tot_g - prev_g curr_gs = tot_gs - prev_gs curr_cg = tot_cg - prev_cg curr_sho = tot_sho - prev_sho curr_ip = tot_ip - prev_ip curr_h = tot_h - prev_h curr_r = tot_r - prev_r curr_er = tot_er - prev_er curr_bb = tot_bb - prev_bb curr_k = tot_k - prev_k curr_hr = tot_hr - prev_hr curr_gdp = tot_gdp - prev_gdp curr_era = 9*(float(curr_er)/float(curr_ip)) entry['era'] = curr_era entry['w'] = curr_w entry['l'] = curr_l entry['sv'] = curr_sv entry['g'] = curr_g entry['gs'] = curr_gs entry['cg'] = curr_cg entry['sho'] = curr_sho entry['ip'] = curr_ip entry['h'] = curr_h entry['r'] = curr_r entry['er'] = curr_er entry['bb'] = curr_bb entry['k'] = curr_k entry['hr'] = curr_hr entry['gdp'] = curr_gdp entries.append(entry) print entries if entries != []: db.insertRowDict(entries, 'register_pitching_primary', replace=True, insertMany=True, rid=0) db.conn.commit() # # used for debugging # for e in entries: # print e # raw_input("") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--year',default=2017) args = parser.parse_args() process(args.year)
{ "content_hash": "c12a6e63b3361889a52bab5e97a20069", "timestamp": "", "source": "github", "line_count": 296, "max_line_length": 185, "avg_line_length": 29.679054054054053, "alnum_prop": 0.5447922595332954, "repo_name": "Connor-R/NSBL", "id": "db05d46de0c750b22013f5a9aca53756ab87c95a", "size": "8785", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "failure_scripts/NSBL_processed_multi_team.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1058" }, { "name": "HTML", "bytes": "24557389" }, { "name": "Python", "bytes": "566653" }, { "name": "Shell", "bytes": "12883" } ], "symlink_target": "" }
import os import sys import numpy as np # Expose available (onnx::* and protobuf::*) symbols from onnxruntime to resolve references in # the custom ops shared library. Deepbind flag is required to avoid conflicts with other # instances of onnx/protobuf libraries. import onnxruntime # Restore dlopen flags. import orttraining_external_custom_ops so = onnxruntime.SessionOptions() sess = onnxruntime.InferenceSession("testdata/model.onnx", so) input = np.random.rand(2, 2).astype(np.float32) output = sess.run(None, {"input1": input})[0] np.testing.assert_equal(input, output)
{ "content_hash": "c7889f9eded05f93a7a05639ba7cb99b", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 94, "avg_line_length": 34.05882352941177, "alnum_prop": 0.7754749568221071, "repo_name": "microsoft/onnxruntime", "id": "7d3e4edf48bd825e4be956148016b5342ffeb3c0", "size": "653", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "orttraining/orttraining/test/external_custom_ops/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "1763425" }, { "name": "Batchfile", "bytes": "17040" }, { "name": "C", "bytes": "955390" }, { "name": "C#", "bytes": "2304597" }, { "name": "C++", "bytes": "39435305" }, { "name": "CMake", "bytes": "514764" }, { "name": "CSS", "bytes": "138431" }, { "name": "Cuda", "bytes": "1104338" }, { "name": "Dockerfile", "bytes": "8089" }, { "name": "HLSL", "bytes": "11234" }, { "name": "HTML", "bytes": "5933" }, { "name": "Java", "bytes": "418665" }, { "name": "JavaScript", "bytes": "212575" }, { "name": "Jupyter Notebook", "bytes": "218327" }, { "name": "Kotlin", "bytes": "4653" }, { "name": "Liquid", "bytes": "5457" }, { "name": "NASL", "bytes": "2628" }, { "name": "Objective-C", "bytes": "151027" }, { "name": "Objective-C++", "bytes": "107084" }, { "name": "Pascal", "bytes": "9597" }, { "name": "PowerShell", "bytes": "16419" }, { "name": "Python", "bytes": "5041661" }, { "name": "Roff", "bytes": "27539" }, { "name": "Ruby", "bytes": "3545" }, { "name": "Shell", "bytes": "116513" }, { "name": "Swift", "bytes": "115" }, { "name": "TypeScript", "bytes": "973087" } ], "symlink_target": "" }
from burlap import ServiceSatchel from burlap.constants import * from burlap.decorators import task class GPSDSatchel(ServiceSatchel): name = 'gpsd' @property def packager_system_packages(self): return { UBUNTU: ['gpsd', 'gpsd-clients', 'python-gps', 'ntp'], DEBIAN: ['gpsd', 'gpsd-clients', 'python-gps', 'ntp'], } def set_defaults(self): self.env.service_commands = { START:{ UBUNTU: 'service gpsd start', DEBIAN: 'service gpsd start', }, STOP:{ UBUNTU: 'service gpsd stop', DEBIAN: 'service gpsd stop', }, DISABLE:{ UBUNTU: 'chkconfig gpsd off', (UBUNTU, '14.04'): 'update-rc.d -f gpsd remove', DEBIAN: 'update-rc.d gpsd disable', }, ENABLE:{ UBUNTU: 'chkconfig gpsd on', (UBUNTU, '14.04'): 'update-rc.d gpsd defaults', DEBIAN: 'update-rc.d gpsd enable', }, RELOAD:{ UBUNTU: 'service gpsd reload', DEBIAN: 'service gpsd reload', }, RESTART:{ UBUNTU: 'service gpsd restart; sleep 3', DEBIAN: 'service gpsd restart; sleep 3', }, } @task def launch(self): self.run('gpsd /dev/ttyUSB0 -F /var/run/gpsd.sock') @task(precursors=['packager', 'user']) def configure(self): self.install_packages() configure.is_deployer = True gpsd = GPSDSatchel()
{ "content_hash": "045febec28d29f94589e7a1e6bcd61d5", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 66, "avg_line_length": 29.545454545454547, "alnum_prop": 0.496, "repo_name": "chrisspen/burlap", "id": "e6ec2673a6d59aea2f04fa1cadf82ac0398df597", "size": "1626", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "burlap/gpsd.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "722479" }, { "name": "Shell", "bytes": "11659" } ], "symlink_target": "" }
"""Commands for managing security groups.""" import click from ...jobs import securitygroups as sg_jobs from ...jobs.exceptions import AwsError from ...jobs.exceptions import MissingKey from ...jobs.exceptions import Non200Response from ...jobs.exceptions import PermissionDenied from ...jobs.exceptions import ResourceAlreadyExists from ...jobs.exceptions import ResourceDoesNotExist from ...jobs.exceptions import ResourceNotCreated from ...jobs.exceptions import ResourceNotDeleted from .. import utils @click.group() def securitygroups(): """Manage security groups.""" pass @securitygroups.command(name="list") @click.option( "--profile", help="An AWS profile to connect with.") @click.option( "--access-key-id", help="An AWS access key ID.") @click.option( "--access-key-secret", help="An AWS access key secret.") def list_security_groups( profile=None, access_key_id=None, access_key_secret=None): """List security groups.""" aws_profile = utils.get_profile(profile, access_key_id, access_key_secret) try: security_groups = sg_jobs.fetch_all(aws_profile) except PermissionDenied: msg = "You don't have permission to view security groups." raise click.ClickException(msg) except (MissingKey, Non200Response) as error: raise click.ClickException(str(error)) except AwsError as error: raise click.ClickException(str(error)) if security_groups: for security_group in security_groups: display_name = sg_jobs.get_display_name(security_group) click.echo(display_name) @securitygroups.command(name="create") @click.argument("name") @click.option( "--vpc", help="A VPC name (or ID).") @click.option( "--tag", multiple=True, help="KEY:VALUE tag for the security group.") @click.option( "--profile", help="An AWS profile to connect with.") @click.option( "--access-key-id", help="An AWS access key ID.") @click.option( "--access-key-secret", help="An AWS access key secret.") def create_security_group( name, vpc=None, tag=None, profile=None, access_key_id=None, access_key_secret=None): """Create security groups.""" aws_profile = utils.get_profile(profile, access_key_id, access_key_secret) tags = None if tag: tags = utils.parse_tags(tag) try: security_groups = sg_jobs.create(aws_profile, name, vpc, tags) except PermissionDenied: msg = "You don't have permission to create security groups." raise click.ClickException(msg) except (MissingKey, Non200Response) as error: raise click.ClickException(str(error)) except AwsError as error: raise click.ClickException(str(error)) except (ResourceDoesNotExist, ResourceAlreadyExists, ResourceNotCreated) as error: raise click.ClickException(str(error)) if security_groups: for security_group in security_groups: display_name = sg_jobs.get_display_name(security_group) click.echo(display_name) @securitygroups.command(name="delete") @click.argument("name") @click.option( "--profile", help="An AWS profile to connect with.") @click.option( "--access-key-id", help="An AWS access key ID.") @click.option( "--access-key-secret", help="An AWS access key secret.") def delete_security_group( name, profile=None, access_key_id=None, access_key_secret=None): """Delete security groups.""" aws_profile = utils.get_profile(profile, access_key_id, access_key_secret) try: security_groups = sg_jobs.delete(aws_profile, name) except PermissionDenied: msg = "You don't have permission to delete security groups." raise click.ClickException(msg) except (MissingKey, Non200Response) as error: raise click.ClickException(str(error)) except AwsError as error: raise click.ClickException(str(error)) except (ResourceDoesNotExist, ResourceNotDeleted) as error: raise click.ClickException(str(error))
{ "content_hash": "3a2d86a055a2d73197695d942d07e115", "timestamp": "", "source": "github", "line_count": 137, "max_line_length": 86, "avg_line_length": 30.284671532846716, "alnum_prop": 0.6693179079296216, "repo_name": "jtpaasch/armyguys", "id": "f95d6d2f7431ff7d7fda23ee3d6f07f472a850de", "size": "4174", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "armyguys/cli/commands/securitygroups.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "334826" }, { "name": "Shell", "bytes": "3654" } ], "symlink_target": "" }
import os BASE_DIR = os.path.dirname(__file__) import dj_database_url from project_runpy import env # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = env.get('SECRET_KEY', 'Rotom') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = env.get('DEBUG', False) ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'taggit', 'tasteinsight.apps.backend', # support 'django_extensions', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'tasteinsight.urls' WSGI_APPLICATION = 'tasteinsight.wsgi.application' # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = {'default': dj_database_url.config(default='sqlite:///tasteinsight.db')} # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = False USE_L10N = False USE_TZ = True TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', ], 'debug': DEBUG, }, }, ] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) SITE_ID = 1 LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'root': { 'level': os.environ.get('LOGGING_LEVEL', 'WARNING'), 'handlers': ['console'], }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, 'readable_sql': { '()': 'project_runpy.ReadableSqlFilter', }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'project_runpy.ColorizingStreamHandler', }, }, 'loggers': { 'django.db.backends': { 'level': 'DEBUG' if env.get('SQL', False) else 'INFO', 'handlers': ['console'], 'filters': ['require_debug_true', 'readable_sql'], 'propagate': False, }, 'factory': { 'level': 'ERROR', 'propagate': False, }, }, }
{ "content_hash": "02bfdbb54b629b83aac720578723a5e2", "timestamp": "", "source": "github", "line_count": 130, "max_line_length": 84, "avg_line_length": 24.884615384615383, "alnum_prop": 0.6077279752704792, "repo_name": "crccheck/tasteinsight", "id": "747ffa7ebf1034a318cbefdca1791fac836af8af", "size": "3307", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tasteinsight/settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CoffeeScript", "bytes": "2035" }, { "name": "HTML", "bytes": "670" }, { "name": "Makefile", "bytes": "476" }, { "name": "Python", "bytes": "10056" } ], "symlink_target": "" }
from logging import debug import os from subprocess import CalledProcessError, PIPE, Popen import sys import logging import subprocess logger = logging.getLogger(__name__) # import psutil class ProcessList(object): def __init__(self, processes): """ @param processes Process[] """ self.processes = processes def exist(self): return len(self.processes) > 0 def listens(self, ip, port): for p in self.processes: for c in p.get_connections(): if c.status == 'LISTEN': ip_, port_ = c.local_address if ip_ == ip and port == port_: return True return False def find_process(name=None, name_like=None, cmd_like=None, pid=None, username=None): """ :param name: :param name_like: :param pid: :param username: :return: pywizrd.utils.process.ProcessList """ if not (name or name_like or cmd_like or pid or username): return None def match_by_name(p): return p.name == name def match_by_pid(p): return p.pid == pid def match_by_username(p): return p.username == username def match_by_name_like(p): return name_like in p.name def match_by_cmd_like(p): return cmd_like in ' '.join(p.cmdline) if name: match = match_by_name elif pid: match = match_by_pid elif username: match = match_by_username elif name_like: match = match_by_name_like elif cmd_like: match = match_by_cmd_like else: match = match_by_name return ProcessList([p for p in psutil.process_iter() if match(p)]) def filter_secrets(text, secrets): for secret in secrets: text = text.replace(secret, '***SECRET***') return text def process_run(command, ignore_errors=False, log_output=True, secrets=[]): """ Command is not safe. DO NOT use commands that are composed from untrusted sources to execute processes. @param command: @return: """ debug('Shell command # %s' % filter_secrets(command, secrets)) out = '' try: proc = subprocess.Popen(command, shell=isinstance(command, str), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: line = proc.stdout.readline().decode('ascii') if line != '': if log_output: logging.info(line) out += line else: break proc.wait() proc_returncode = proc.returncode if not ignore_errors and proc_returncode != 0: raise Exception('Script is failed (return code: %d). Command: %s' % (proc_returncode, filter_secrets(command, secrets))) except CalledProcessError as e: if not ignore_errors: raise e return out.strip()
{ "content_hash": "3a7fb696d6fc8736f37176ab89d73a4c", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 132, "avg_line_length": 25.102564102564102, "alnum_prop": 0.5757575757575758, "repo_name": "pywizard/pywizard", "id": "8c09158270792db1bcfdf66eaa8869bed97a6cc3", "size": "2937", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pywizard/process.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "93736" }, { "name": "Shell", "bytes": "5842" } ], "symlink_target": "" }
from tournament import * def testDeleteMatches(): deleteMatches() print "1. Old matches can be deleted." def testDelete(): deleteMatches() deletePlayers() print "2. Player records can be deleted." def testCount(): deleteMatches() deletePlayers() c = countPlayers() if c == '0': raise TypeError( "countPlayers() should return numeric zero, not string '0'.") if c != 0: raise ValueError("After deleting, countPlayers should return zero.") print "3. After deleting, countPlayers() returns zero." def testRegister(): deleteMatches() deletePlayers() registerPlayer("Chandra Nalaar") c = countPlayers() if c != 1: raise ValueError( "After one player registers, countPlayers() should be 1.") print "4. After registering a player, countPlayers() returns 1." def testRegisterCountDelete(): deleteMatches() deletePlayers() registerPlayer("Markov Chaney") registerPlayer("Joe Malik") registerPlayer("Mao Tsu-hsi") registerPlayer("Atlanta Hope") c = countPlayers() if c != 4: raise ValueError( "After registering four players, countPlayers should be 4.") deletePlayers() c = countPlayers() if c != 0: raise ValueError("After deleting, countPlayers should return zero.") print "5. Players can be registered and deleted." def testStandingsBeforeMatches(): deleteMatches() deletePlayers() registerPlayer("Melpomene Murray") registerPlayer("Randy Schwartz") standings = playerStandings() if len(standings) < 2: raise ValueError("Players should appear in playerStandings even before " "they have played any matches.") elif len(standings) > 2: raise ValueError("Only registered players should appear in standings.") if len(standings[0]) != 4: raise ValueError("Each playerStandings row should have four columns.") [(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0: raise ValueError( "Newly registered players should have no matches or wins.") if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]): raise ValueError("Registered players' names should appear in standings, " "even if they have no matches played.") print "6. Newly registered players appear in the standings with no matches." def testReportMatches(): deleteMatches() deletePlayers() registerPlayer("Bruno Walton") registerPlayer("Boots O'Neal") registerPlayer("Cathy Burton") registerPlayer("Diane Grant") standings = playerStandings() [id1, id2, id3, id4] = [row[0] for row in standings] reportMatch(id1, id2) reportMatch(id3, id4) standings = playerStandings() for (i, n, w, m) in standings: if m != 1: raise ValueError("Each player should have one match recorded.") if i in (id1, id3) and w != 1: raise ValueError("Each match winner should have one win recorded.") elif i in (id2, id4) and w != 0: raise ValueError("Each match loser should have zero wins recorded.") print "7. After a match, players have updated standings." def testPairings(): deleteMatches() deletePlayers() registerPlayer("Twilight Sparkle") registerPlayer("Fluttershy") registerPlayer("Applejack") registerPlayer("Pinkie Pie") standings = playerStandings() [id1, id2, id3, id4] = [row[0] for row in standings] reportMatch(id1, id2) reportMatch(id3, id4) pairings = swissPairings() if len(pairings) != 2: raise ValueError( "For four players, swissPairings should return two pairs.") [(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4)] = pairings correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id4])]) actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4])]) if correct_pairs != actual_pairs: raise ValueError( "After one match, players with one win should be paired.") print "8. After one match, players with one win are paired." def testPairingsWithHistory(): deleteMatches() deletePlayers() registerPlayer("Twilight Sparkle") registerPlayer("Fluttershy") registerPlayer("Applejack") registerPlayer("Pinkie Pie") registerPlayer("rick") registerPlayer("morty") standings = playerStandings() [id1, id2, id3, id4, id5, id6] = [row[0] for row in standings] reportMatch(id1, id2) reportMatch(id3, id4) reportMatch(id2, id6) reportMatch(id5, id6) reportMatch(id3, id2) reportMatch(id1, id2) pairings = swissPairings() if len(pairings) != 3: raise ValueError( "For four players, swissPairings should return three pairs.") [(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4), (pid5, pname5, pid6, pname6)] = pairings correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id5]), frozenset([id4, id6])]) actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4]), frozenset([pid5, pid6])]) if correct_pairs != actual_pairs: raise ValueError( "After matches, players with most wins should be paired, players previously matched should not be matched again") print "9. After matches, players with most wins are paired, players previously matched are not matched" if __name__ == '__main__': testDeleteMatches() testDelete() testCount() testRegister() testRegisterCountDelete() testStandingsBeforeMatches() testReportMatches() testPairings() testPairingsWithHistory() print "Success! All tests pass!"
{ "content_hash": "bb4114fa3244e905bfe238b2dd175ceb", "timestamp": "", "source": "github", "line_count": 165, "max_line_length": 125, "avg_line_length": 35.29090909090909, "alnum_prop": 0.6532715095311695, "repo_name": "barca/Nano_Tournament", "id": "f2cab3e29e74406b7378e2b51e3049289d121f7a", "size": "5879", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tournament_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "11621" } ], "symlink_target": "" }
""" Models """ import numpy as np import pandas as pd from sqlalchemy import sql, create_engine, MetaData, Table, Column, Integer, VARCHAR, TIMESTAMP from sklearn.metrics import jaccard_similarity_score class DB: def __init__(self): from instance.config import PG_USER, PG_PASSWORD # connection db_uri = "postgresql://" + PG_USER + ":" + PG_PASSWORD + "@localhost/shrecdb" engine = create_engine(db_uri) self.connection = engine.connect() # schema metadata = MetaData() self.books_schema = Table("books", metadata, Column("book_id", Integer, nullable=False, primary_key=True), Column("title", VARCHAR)) self.ratings_schema = Table("ratings", metadata, Column("user_id", Integer, nullable=False, primary_key=True), Column("rated_at", TIMESTAMP), Column("rat_book_01", Integer), Column("rat_book_02", Integer), Column("rat_book_03", Integer), Column("rat_book_04", Integer), Column("rat_book_05", Integer), Column("rat_book_06", Integer), Column("rat_book_07", Integer), Column("rat_book_08", Integer), Column("rat_book_09", Integer), Column("rat_book_10", Integer), Column("rat_book_11", Integer), Column("rat_book_12", Integer), Column("rat_book_13", Integer), Column("rat_book_14", Integer), Column("rat_book_15", Integer),) def get_books(self): """ Get books data from DB :return: Pandas Dataframe {book_id, title} """ # DB query query_books = "SELECT * FROM books" books = self.connection.execute(sql.text(query_books)) self.connection.close() # Data processing book_ids = []; titles = [] for book in books: book_ids.append(book[0]) titles.append(book[1]) # Dataframe books = {"BOOK_ID": book_ids, "TITLE": titles} return pd.DataFrame(books) def get_ratings(self): """ Get ratings data from DB :return: Pandas Dataframe {user_id, rated_at, ratings_per_book[1:15} """ # DB query query_ratings = "SELECT * FROM ratings" all_ratings = self.connection.execute(sql.text(query_ratings)) self.connection.close() # Data processing user_ids = []; ts = []; br01 = []; br02 = []; br03 = []; br04 = []; br05 = []; br06 = []; br07 = []; br08 = []; br09 = []; br10 = []; br11 = []; br12 = []; br13 = []; br14 = []; br15 = [] for rating in all_ratings: user_ids.append(rating[0]) ts.append(rating[1]) br01.append(rating[2]); br02.append(rating[3]); br03.append(rating[4]); br04.append(rating[5]); br05.append(rating[6]); br06.append(rating[7]); br07.append(rating[8]);br08.append(rating[9]); br09.append(rating[10]); br10.append(rating[11]); br11.append(rating[12]); br12.append(rating[13]); br13.append(rating[14]); br14.append(rating[15]); br15.append(rating[16]) # Dataframe ratings = {"USER_ID": user_ids, "RATED_AT": ts, "RAT_B01": br01, "RAT_B02": br02, "RAT_B03": br03, "RAT_B04": br04, "RAT_B05": br05, "RAT_B06": br06, "RAT_B07": br07, "RAT_B08": br08, "RAT_B09": br09, "RAT_B10": br10, "RAT_B11": br11, "RAT_B12": br12, "RAT_B13": br13, "RAT_B14": br14, "RAT_B15": br15} return pd.DataFrame(ratings) def insert_user_rating(self, user_rating): """ Insert new rating from user into DB :param user_rating: list of ratings, sorted from book01 to book15 :return: confirmation string """ insert = self.ratings_schema.insert().values(rat_book_01 = user_rating[0], rat_book_02 = user_rating[1], rat_book_03 = user_rating[2], rat_book_04 = user_rating[3], rat_book_05 = user_rating[4], rat_book_06 = user_rating[5], rat_book_07 = user_rating[6], rat_book_08 = user_rating[7], rat_book_09 = user_rating[8], rat_book_10 = user_rating[9], rat_book_11 = user_rating[10], rat_book_12 = user_rating[11], rat_book_13 = user_rating[12], rat_book_14 = user_rating[13], rat_book_15 = user_rating[14]) self.connection.execute(insert) self.connection.close() return "Rating inserted: " + str(user_rating) class RecommendationEngine: def __init__(self, user_rating, collaborative_data): self.user_rating = user_rating self.collaborative_data = collaborative_data def recommend_books(self): """ Recommend books wtih user input based on collaborative dataset :return: sorted list of recommended books tuples (book_id, recommendation_score) """ def all_books_recommendation(user_rating=self.user_rating, ratings_data=self.collaborative_data, method=jaccard_similarity_score): """ Recommendation engine based on collaborative filtering. Creates recommendation array summing up weighted similarity scores by book, divided by sum of user similarities per rating. :param user_rating: list of user inputs as int. Size 15, nan replaced with 0 :param ratings_data: dataframe with user ratings for the 15 books :param method: similarity measurement method. Jaccard similarity (default) :return: array of recommendation ratings for all 15 books """ ratings_matrix = ratings_data.ix[: , "RAT_B01":"RAT_B15"].as_matrix().astype(float) weighted_ratings = np.array([]); user_similarities = np.array([]) for row in ratings_matrix: similarity_coefficient = method(user_rating, row) weighted_row = row * similarity_coefficient row[row != 0.] = similarity_coefficient if weighted_ratings.size == 0 and user_similarities.size == 0: weighted_ratings = np.hstack((weighted_ratings, weighted_row)) user_similarities = np.hstack((user_similarities, row)) else: weighted_ratings = np.vstack((weighted_ratings, weighted_row)) user_similarities = np.vstack((user_similarities, row)) total = np.sum(weighted_ratings, axis=0) sim_sum = np.sum(user_similarities, axis=0) return total / sim_sum def filter_recommendation(recommendation, user_input=self.user_rating): """ Filter recommendation array based on input books :param recommendation: recommendation array of unsorted recommendation values :param user_input: list from user input :return: tuple ([index of recommended books],[ratings of recommended books]) """ recommend_books = [] recommend_books_ratings = [] for position, item in enumerate(user_input): if item == 0: recommend_books.append(position) for book_id in recommend_books: recommend_books_ratings.append(recommendation[book_id]) return recommend_books, recommend_books_ratings def sort_recommendation(recommended_idx, recommended_ratings): """ Sort recommendation based on rating values :param recommended_idx: list of book indices from recommendation :param recommended_ratings: list of ratings from recommendation :return: list of sorted recommendations - tuple of (book_id, rating) """ recommended_dict = {} for id, rating in zip(recommended_idx, recommended_ratings): recommended_dict[id] = rating return sorted(recommended_dict.items(), key=lambda x: x[1], reverse=True) recommended_books, recommended_ratings = filter_recommendation(all_books_recommendation(self.user_rating)) recommendation_for_user = sort_recommendation(recommended_books, recommended_ratings) return recommendation_for_user
{ "content_hash": "1c25579472476590d35af8e0fb776d0d", "timestamp": "", "source": "github", "line_count": 170, "max_line_length": 127, "avg_line_length": 51.076470588235296, "alnum_prop": 0.5622480709432224, "repo_name": "donK23/shed01", "id": "0e429769f9e2788c1f4ac6f7b679aac76f6715c2", "size": "8683", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PyBeispiele/SherlockRec/app/models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "208667" }, { "name": "CSS", "bytes": "178378" }, { "name": "HTML", "bytes": "4924144" }, { "name": "Java", "bytes": "79538" }, { "name": "JavaScript", "bytes": "12703" }, { "name": "Makefile", "bytes": "206218" }, { "name": "OpenEdge ABL", "bytes": "4056115" }, { "name": "Python", "bytes": "43411" }, { "name": "QML", "bytes": "1137748" }, { "name": "QMake", "bytes": "907" }, { "name": "R", "bytes": "9636" } ], "symlink_target": "" }
from braintree.payment_method_parser import parse_payment_method from braintree.resource import Resource class RevokedPaymentMethodMetadata(Resource): def __init__(self, gateway, attributes): self.revoked_payment_method = parse_payment_method(gateway, attributes) self.customer_id = self.revoked_payment_method.customer_id self.token = self.revoked_payment_method.token
{ "content_hash": "3178428ea62d38ab3e130c3fd9bd1797", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 79, "avg_line_length": 44.44444444444444, "alnum_prop": 0.7625, "repo_name": "braintree/braintree_python", "id": "b4ed3373dd493d6d5c2309ed3e2dd9df5a06f084", "size": "400", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "braintree/revoked_payment_method_metadata.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "252" }, { "name": "Makefile", "bytes": "238" }, { "name": "Python", "bytes": "1338636" }, { "name": "Ruby", "bytes": "2099" }, { "name": "Shell", "bytes": "193" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('board', '0010_notification_checked_time'), ] operations = [ migrations.AlterModelOptions( name='notification', options={'ordering': ['-created_time']}, ), migrations.AlterField( model_name='notification', name='to_user', field=models.ForeignKey(related_name='_notifications', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
{ "content_hash": "f203a27c3ec1f7a083d72ad12958f40b", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 96, "avg_line_length": 26.625, "alnum_prop": 0.6056338028169014, "repo_name": "devunt/hydrocarbon", "id": "b05983d48b7d4939d26da1f3e4ff3dd020ca1fd3", "size": "663", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "board/migrations/0011_auto_20150111_1842.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "172930" }, { "name": "HTML", "bytes": "60201" }, { "name": "JavaScript", "bytes": "33750" }, { "name": "Python", "bytes": "106570" } ], "symlink_target": "" }
from .sub_resource import SubResource class InboundNatRule(SubResource): """Inbound NAT rule of the load balancer. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param frontend_ip_configuration: A reference to frontend IP addresses. :type frontend_ip_configuration: ~azure.mgmt.network.v2017_11_01.models.SubResource :ivar backend_ip_configuration: A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backend IP. :vartype backend_ip_configuration: ~azure.mgmt.network.v2017_11_01.models.NetworkInterfaceIPConfiguration :param protocol: Possible values include: 'Udp', 'Tcp', 'All' :type protocol: str or ~azure.mgmt.network.v2017_11_01.models.TransportProtocol :param frontend_port: The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534. :type frontend_port: int :param backend_port: The port used for the internal endpoint. Acceptable values range from 1 to 65535. :type backend_port: int :param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. :type idle_timeout_in_minutes: int :param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. :type enable_floating_ip: bool :param provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str """ _validation = { 'backend_ip_configuration': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'}, 'backend_port': {'key': 'properties.backendPort', 'type': 'int'}, 'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'}, 'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def __init__(self, *, id: str=None, frontend_ip_configuration=None, protocol=None, frontend_port: int=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None: super(InboundNatRule, self).__init__(id=id, **kwargs) self.frontend_ip_configuration = frontend_ip_configuration self.backend_ip_configuration = None self.protocol = protocol self.frontend_port = frontend_port self.backend_port = backend_port self.idle_timeout_in_minutes = idle_timeout_in_minutes self.enable_floating_ip = enable_floating_ip self.provisioning_state = provisioning_state self.name = name self.etag = etag
{ "content_hash": "1d629183c4ac07dc37f6e544cc64ad89", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 282, "avg_line_length": 51.675, "alnum_prop": 0.6845670053217223, "repo_name": "lmazuel/azure-sdk-for-python", "id": "1024de45ababdc182c8528723d54c7e1ad5291a1", "size": "4608", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/inbound_nat_rule_py3.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42572767" } ], "symlink_target": "" }
from .all import MUNSELL_COLOURS_ALL from .experimental import MUNSELL_COLOURS_1929 from .real import MUNSELL_COLOURS_REAL from colour.utilities import CanonicalMapping __all__ = [ "MUNSELL_COLOURS_ALL", ] __all__ += [ "MUNSELL_COLOURS_1929", ] __all__ += [ "MUNSELL_COLOURS_REAL", ] MUNSELL_COLOURS = CanonicalMapping( { "Munsell Colours All": MUNSELL_COLOURS_ALL, "Munsell Colours 1929": MUNSELL_COLOURS_1929, "Munsell Colours Real": MUNSELL_COLOURS_REAL, } ) MUNSELL_COLOURS.__doc__ = """ Define the *Munsell Renotation System* datasets. - ``Munsell Colours All``: *all* published *Munsell* colours, including the extrapolated colors. - ``Munsell Colours 1929``: the colours appearing in the 1929 *Munsell Book of Color*. These data has been used in the scaling experiments leading to the 1943 renotation. - ``Munsell Colours Real``: *real*, within MacAdam limits *Munsell* colours only. They are the colours listed in the original 1943 renotation article *(Newhall, Nickerson, & Judd, 1943)*. Notes ----- - The Munsell Renotation data commonly available within the *all.dat*, *experimental.dat* and *real.dat* files features *CIE xyY* colourspace values that are scaled by a :math:`1 / 0.975 \\simeq 1.02568` factor. If you are performing conversions using *Munsell* *Colorlab* specification, e.g. *2.5R 9/2*, according to *ASTM D1535-08e1* method, you should not scale the output :math:`Y` Luminance. However, if you use directly the *CIE xyY* colourspace values from the Munsell Renotation data data, you should scale the :math:`Y` Luminance before conversions by a :math:`0.975` factor. *ASTM D1535-08e1* states that:: The coefficients of this equation are obtained from the 1943 equation by multiplying each coefficient by 0.975, the reflectance factor of magnesium oxide with respect to the perfect reflecting diffuser, and rounding to ve digits of precision. - Chromaticities assume *CIE Illuminant C*, approximately 6700K, as neutral origin for both the hue and chroma loci. References ---------- - :cite:`MunsellColorSciencec` : Munsell Color Science. (n.d.). Munsell Colours Data. Retrieved August 20, 2014, from http://www.cis.rit.edu/research/mcsl2/online/munsell.php Aliases: - 'all': 'Munsell Colours All' - '1929': 'Munsell Colours 1929' - 'real': 'Munsell Colours Real' """ MUNSELL_COLOURS["all"] = MUNSELL_COLOURS["Munsell Colours All"] MUNSELL_COLOURS["1929"] = MUNSELL_COLOURS["Munsell Colours 1929"] MUNSELL_COLOURS["real"] = MUNSELL_COLOURS["Munsell Colours Real"] __all__ += [ "MUNSELL_COLOURS", ]
{ "content_hash": "75a59d5508d7953d45a439141ad4fb66", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 78, "avg_line_length": 36, "alnum_prop": 0.7, "repo_name": "colour-science/colour", "id": "0ce892ad0bcfd294cf10d61357046a9989e5d8bd", "size": "2700", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "colour/notation/datasets/munsell/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "7967270" }, { "name": "TeX", "bytes": "163213" }, { "name": "Visual Basic 6.0", "bytes": "1170" } ], "symlink_target": "" }
import re text = 'abbaaabbbaaaaa' pattern = 'ab' for match in re.findall(pattern, text): print('Found "%s"' % match) for match in re.finditer(pattern, text): s = match.start() e = match.end() print('Found %s at %d to %d' % (text[s:e], s, e))
{ "content_hash": "6a3ebdc437db0a77c7739056349ed0ef", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 53, "avg_line_length": 21.75, "alnum_prop": 0.6015325670498084, "repo_name": "eroicaleo/ThePythonStandardLibraryByExample", "id": "94a8d4902b43721c2c66239be3d12f6518eed5bd", "size": "285", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ch01Text/1.3re/MultipleMatching.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "34587" } ], "symlink_target": "" }
import json import pytest import responses from twitch.client import TwitchClient from twitch.constants import BASE_URL from twitch.exceptions import TwitchAttributeException from twitch.resources import Community, User example_community = { "_id": "e9f17055-810f-4736-ba40-fba4ac541caa", "name": "DallasTesterCommunity", } example_user = { "_id": "44322889", "name": "dallas", } @responses.activate def test_get_by_name(): responses.add( responses.GET, "{}communities".format(BASE_URL), body=json.dumps(example_community), status=200, content_type="application/json", ) client = TwitchClient("client id") community = client.communities.get_by_name("spongebob") assert len(responses.calls) == 1 assert isinstance(community, Community) assert community.id == example_community["_id"] assert community.name == example_community["name"] @responses.activate def test_get_by_id(): community_id = "abcd" responses.add( responses.GET, "{}communities/{}".format(BASE_URL, community_id), body=json.dumps(example_community), status=200, content_type="application/json", ) client = TwitchClient("client id") community = client.communities.get_by_id(community_id) assert len(responses.calls) == 1 assert isinstance(community, Community) assert community.id == example_community["_id"] assert community.name == example_community["name"] @responses.activate def test_update(): community_id = "abcd" responses.add( responses.PUT, "{}communities/{}".format(BASE_URL, community_id), body=json.dumps(example_community), status=204, content_type="application/json", ) client = TwitchClient("client id") client.communities.update(community_id) assert len(responses.calls) == 1 @responses.activate def test_get_top(): response = {"_cursor": "MTA=", "_total": 100, "communities": [example_community]} responses.add( responses.GET, "{}communities/top".format(BASE_URL), body=json.dumps(response), status=200, content_type="application/json", ) client = TwitchClient("client id") communities = client.communities.get_top() assert len(responses.calls) == 1 assert len(communities) == 1 community = communities[0] assert isinstance(community, Community) assert community.id == example_community["_id"] assert community.name == example_community["name"] @responses.activate @pytest.mark.parametrize("param,value", [("limit", 101)]) def test_get_top_raises_if_wrong_params_are_passed_in(param, value): client = TwitchClient("client id") kwargs = {param: value} with pytest.raises(TwitchAttributeException): client.communities.get_top(**kwargs) @responses.activate def test_get_banned_users(): community_id = "abcd" response = {"_cursor": "", "banned_users": [example_user]} responses.add( responses.GET, "{}communities/{}/bans".format(BASE_URL, community_id), body=json.dumps(response), status=200, content_type="application/json", ) client = TwitchClient("client id", "oauth token") users = client.communities.get_banned_users(community_id) assert len(responses.calls) == 1 assert len(users) == 1 user = users[0] assert isinstance(user, User) assert user.id == example_user["_id"] assert user.name == example_user["name"] @responses.activate @pytest.mark.parametrize("param,value", [("limit", 101)]) def test_get_banned_users_raises_if_wrong_params_are_passed_in(param, value): client = TwitchClient("client id", "oauth token") kwargs = {param: value} with pytest.raises(TwitchAttributeException): client.communities.get_banned_users("1234", **kwargs) @responses.activate def test_ban_user(): community_id = "abcd" user_id = 1234 responses.add( responses.PUT, "{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.ban_user(community_id, user_id) assert len(responses.calls) == 1 @responses.activate def test_unban_user(): community_id = "abcd" user_id = 1234 responses.add( responses.DELETE, "{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.unban_user(community_id, user_id) assert len(responses.calls) == 1 @responses.activate def test_create_avatar_image(): community_id = "abcd" responses.add( responses.POST, "{}communities/{}/images/avatar".format(BASE_URL, community_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.create_avatar_image(community_id, "imagecontent") assert len(responses.calls) == 1 @responses.activate def test_delete_avatar_image(): community_id = "abcd" responses.add( responses.DELETE, "{}communities/{}/images/avatar".format(BASE_URL, community_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.delete_avatar_image(community_id) assert len(responses.calls) == 1 @responses.activate def test_create_cover_image(): community_id = "abcd" responses.add( responses.POST, "{}communities/{}/images/cover".format(BASE_URL, community_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.create_cover_image(community_id, "imagecontent") assert len(responses.calls) == 1 @responses.activate def test_delete_cover_image(): community_id = "abcd" responses.add( responses.DELETE, "{}communities/{}/images/cover".format(BASE_URL, community_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.delete_cover_image(community_id) assert len(responses.calls) == 1 @responses.activate def test_get_moderators(): community_id = "abcd" response = {"moderators": [example_user]} responses.add( responses.GET, "{}communities/{}/moderators".format(BASE_URL, community_id), body=json.dumps(response), status=200, content_type="application/json", ) client = TwitchClient("client id", "oauth token") moderators = client.communities.get_moderators(community_id) assert len(responses.calls) == 1 assert len(moderators) == 1 user = moderators[0] assert isinstance(user, User) assert user.id == example_user["_id"] assert user.name == example_user["name"] @responses.activate def test_add_moderator(): community_id = "abcd" user_id = 12345 responses.add( responses.PUT, "{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.add_moderator(community_id, user_id) assert len(responses.calls) == 1 @responses.activate def test_delete_moderator(): community_id = "abcd" user_id = 12345 responses.add( responses.DELETE, "{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.delete_moderator(community_id, user_id) assert len(responses.calls) == 1 @responses.activate def test_get_permissions(): community_id = "abcd" response = {"ban": True, "timeout": True, "edit": True} responses.add( responses.GET, "{}communities/{}/permissions".format(BASE_URL, community_id), body=json.dumps(response), status=200, content_type="application/json", ) client = TwitchClient("client id", "oauth token") permissions = client.communities.get_permissions(community_id) assert len(responses.calls) == 1 assert isinstance(permissions, dict) assert permissions["ban"] is True @responses.activate def test_report_violation(): community_id = "abcd" responses.add( responses.POST, "{}communities/{}/report_channel".format(BASE_URL, community_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.report_violation(community_id, 12345) assert len(responses.calls) == 1 @responses.activate def test_get_timed_out_users(): community_id = "abcd" response = {"_cursor": "", "timed_out_users": [example_user]} responses.add( responses.GET, "{}communities/{}/timeouts".format(BASE_URL, community_id), body=json.dumps(response), status=200, content_type="application/json", ) client = TwitchClient("client id", "oauth token") users = client.communities.get_timed_out_users(community_id) assert len(responses.calls) == 1 assert len(users) == 1 user = users[0] assert isinstance(user, User) assert user.id == example_user["_id"] assert user.name == example_user["name"] @responses.activate @pytest.mark.parametrize("param,value", [("limit", 101)]) def test_get_timed_out_users_raises_if_wrong_params_are_passed_in(param, value): client = TwitchClient("client id", "oauth token") kwargs = {param: value} with pytest.raises(TwitchAttributeException): client.communities.get_timed_out_users("1234", **kwargs) @responses.activate def test_add_timed_out_user(): community_id = "abcd" user_id = 12345 responses.add( responses.PUT, "{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.add_timed_out_user(community_id, user_id, 5) assert len(responses.calls) == 1 @responses.activate def test_delete_timed_out_user(): community_id = "abcd" user_id = 12345 responses.add( responses.DELETE, "{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id), status=204, content_type="application/json", ) client = TwitchClient("client id", "oauth token") client.communities.delete_timed_out_user(community_id, user_id) assert len(responses.calls) == 1
{ "content_hash": "d35f0307296989239414fe85a20ab3e5", "timestamp": "", "source": "github", "line_count": 419, "max_line_length": 85, "avg_line_length": 26.14558472553699, "alnum_prop": 0.6485623003194888, "repo_name": "tsifrer/python-twitch-client", "id": "8b22e4d1c248a5fd419cb4dc89a783f169ce6d7c", "size": "10955", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/api/test_communities.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "143" }, { "name": "Python", "bytes": "181480" } ], "symlink_target": "" }
from tornado import gen from tornado.web import asynchronous from base_handler import BaseHandler from elephunk.records.activity import Activity class ActivityHandler(BaseHandler): @asynchronous @gen.engine def get(self): rows = yield gen.Task(self.client_for('postgres').select_all, "SELECT * FROM pg_stat_activity", record=Activity) self.render("activity/index.html", connections=rows)
{ "content_hash": "6668fb6db2f19cd2a8358c9203d47a04", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 120, "avg_line_length": 37.90909090909091, "alnum_prop": 0.750599520383693, "repo_name": "pitluga/elephunk", "id": "cd7bce66abf03b1a2ad24050b982b67ac2b76ed8", "size": "417", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "elephunk/handlers/activity_handler.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "375" }, { "name": "JavaScript", "bytes": "2840" }, { "name": "Python", "bytes": "30243" }, { "name": "Shell", "bytes": "272" } ], "symlink_target": "" }
import couchdb import sys def create_db(db_name,couch): if db_name in couch: db = couch[db_name] else: db = couch.create(db_name) return db server_url = 'http://localhost:5984' couch = couchdb.Server(server_url) bag_name = sys.argv[1] db = create_db('bags',couch) doc = { 'bag': bag_name.split('/')[-1] } db.save(doc) db.put_attachment(doc=doc,content=open(bag_name),filename='data.bag',content_type='application/octet-stream') couch.delete('bags')
{ "content_hash": "d777f795f4abd0eba9691be5a6519dc2", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 109, "avg_line_length": 25.210526315789473, "alnum_prop": 0.6680584551148225, "repo_name": "WalkingMachine/sara_commun", "id": "0131cb06558748bdb141bc0e5611eda1fd6b4144", "size": "502", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wm_ork/object_recognition_core/test/db/couch_surfing.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CMake", "bytes": "6113" } ], "symlink_target": "" }
from __future__ import annotations from edb.common import parsing class PrecedenceMeta(parsing.PrecedenceMeta): pass class Precedence(parsing.Precedence, assoc='fail', metaclass=PrecedenceMeta): pass class P_UNION(Precedence, assoc='left', tokens=('UNION', 'EXCEPT',)): pass class P_INTERSECT(Precedence, assoc='left', tokens=('INTERSECT',)): pass class P_IFELSE(Precedence, assoc='right', tokens=('IF', 'ELSE')): pass class P_OR(Precedence, assoc='left', tokens=('OR',)): pass class P_AND(Precedence, assoc='left', tokens=('AND',)): pass class P_NOT(Precedence, assoc='right', tokens=('NOT',)): pass class P_EQUALS(Precedence, assoc='right', tokens=('EQUALS',)): pass class P_ANGBRACKET(Precedence, assoc='nonassoc', tokens=('LANGBRACKET', 'RANGBRACKET')): pass class P_LIKE_ILIKE(Precedence, assoc='nonassoc', tokens=('LIKE', 'ILIKE')): pass class P_IN(Precedence, assoc='nonassoc', tokens=('IN',)): pass class P_IDENT(Precedence, assoc='nonassoc', tokens=('IDENT', 'PARTITION')): pass class P_OP(Precedence, assoc='left', tokens=('OP',)): pass class P_IS(Precedence, assoc='nonassoc', tokens=('IS',)): pass class P_ADD_OP(Precedence, assoc='left', tokens=('PLUS', 'MINUS', 'DOUBLEPLUS')): pass class P_MUL_OP(Precedence, assoc='left', tokens=('STAR', 'SLASH', 'DOUBLESLASH', 'PERCENT')): pass class P_DOUBLEQMARK_OP(Precedence, assoc='right', tokens=('DOUBLEQMARK',)): pass class P_TYPEOF(Precedence, assoc='nonassoc', tokens=('TYPEOF',)): pass class P_INTROSPECT(Precedence, assoc='nonassoc', tokens=('INTROSPECT',)): pass class P_TYPEOR(Precedence, assoc='left', tokens=('PIPE',)): pass class P_TYPEAND(Precedence, assoc='left', tokens=('AMPER',)): pass class P_UMINUS(Precedence, assoc='right'): pass class P_EXISTS(Precedence, assoc='right', tokens=('EXISTS',), rel_to_last='='): pass class P_DISTINCT(Precedence, assoc='right', tokens=('DISTINCT',), rel_to_last='='): pass class P_POW_OP(Precedence, assoc='right', tokens=('CIRCUMFLEX',)): pass class P_TYPECAST(Precedence, assoc='right'): pass class P_BRACE(Precedence, assoc='left', tokens=('LBRACE', 'RBRACE')): pass class P_BRACKET(Precedence, assoc='left', tokens=('LBRACKET', 'RBRACKET')): pass class P_PAREN(Precedence, assoc='left', tokens=('LPAREN', 'RPAREN')): pass class P_DOT(Precedence, assoc='left', tokens=('DOT', 'DOTBW')): pass class P_DETACHED(Precedence, assoc='right', tokens=('DETACHED',)): pass class P_GLOBAL(Precedence, assoc='right', tokens=('GLOBAL',)): pass class P_DOUBLECOLON(Precedence, assoc='left', tokens=('DOUBLECOLON',)): pass class P_AT(Precedence, assoc='left', tokens=('AT',)): pass
{ "content_hash": "a9edeaf8a2762265dd30acfed822009f", "timestamp": "", "source": "github", "line_count": 148, "max_line_length": 77, "avg_line_length": 19.39864864864865, "alnum_prop": 0.6384535005224661, "repo_name": "edgedb/edgedb", "id": "cc27c9361471d62c2e1bc86cc71b96788b8513f7", "size": "3552", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "edb/edgeql/parser/grammar/precedence.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Cython", "bytes": "372837" }, { "name": "JavaScript", "bytes": "7481" }, { "name": "Makefile", "bytes": "1159" }, { "name": "Python", "bytes": "9860929" }, { "name": "Rust", "bytes": "238373" } ], "symlink_target": "" }
from django.conf.urls import patterns, include, url from django.views.generic import RedirectView from django.contrib import admin admin.autodiscover() handler404 = 'djtools.views.errors.four_oh_four_error' handler500 = 'djtools.views.errors.server_error' urlpatterns = patterns('', #url(r'^admin/', include(admin.site.urls)), # my app url(r'^manager/', include('djparking.manager.urls')), # redirect url(r'^$', RedirectView.as_view(url="/forms/parking/manager/")), )
{ "content_hash": "826cb5e03cbf37d0198d5eb642562886", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 68, "avg_line_length": 28.941176470588236, "alnum_prop": 0.7154471544715447, "repo_name": "carthagecollege/django-djparking", "id": "8f065ea9de386a7f80c92644695b402d1b93d2ae", "size": "492", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "djparking/core/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "45198" }, { "name": "JavaScript", "bytes": "5710" }, { "name": "Python", "bytes": "53518" }, { "name": "Shell", "bytes": "1125" } ], "symlink_target": "" }
import cv2 from CvPyMat import CvPyMat test = CvPyMat(); imgPath = "./buri.jpg"; # Testing the conversion of Mat object to Python # img2 = test.loadImageInCpp_Demo(imgPath); # cv2.imshow("pYimg", img2); # cv2.waitKey(0); # Testing multiorientation person detection written in C++ img = cv2.imread(imgPath); rotationDegreeStep = 5; visualizeResults = True; confidenceThr = 0.5; pathToDetector = "./inriaperson.xml"; pathToOutput = "./detectorResults.csv"; test.multiRotPersDet(img, rotationDegreeStep, pathToDetector, confidenceThr, pathToOutput, visualizeResults);
{ "content_hash": "b00362022e06e21455c70caa540b3e5b", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 109, "avg_line_length": 25.043478260869566, "alnum_prop": 0.7517361111111112, "repo_name": "matteobustreo/cvPy", "id": "27de6899a08b46039d44006a6be939e4955d40c0", "size": "576", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cvPyDemo/pyTest.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "11065" }, { "name": "Makefile", "bytes": "479" }, { "name": "Python", "bytes": "576" } ], "symlink_target": "" }
import os import argparse import logging from bootstrap import bootstrap from gitwrapperlib import Git from library import bump from configuration import BRANCHES_SUPPORTED_FOR_TAG # This is the main prefix used for logging LOGGER_BASENAME = '''_CI.tag''' LOGGER = logging.getLogger(LOGGER_BASENAME) LOGGER.addHandler(logging.NullHandler()) def check_branch(): git = Git() if git.get_current_branch() not in BRANCHES_SUPPORTED_FOR_TAG: accepted_branches = ', '.join(BRANCHES_SUPPORTED_FOR_TAG) print(f'Tagging is only supported on {accepted_branches} ' 'you should not tag any other branch, exiting!') raise SystemExit(1) def filter_patch(patch): patch = patch.replace('\ndiff', '\n|||diff') return r''.join([line for line in patch.split('|||') if '{{cookiecutter.project_slug}}' in line]) def create_patch(old_version, new_version): git = Git() patch = git.create_patch(old_version, new_version) patch = filter_patch(patch) patch_file = os.path.join('{{cookiecutter.project_slug}}', '_CI', 'patches', f'{new_version}.patch') with open(patch_file, 'w') as ofile: ofile.write(patch) return patch_file def get_arguments(): parser = argparse.ArgumentParser(description='Handles bumping of the artifact version') group = parser.add_mutually_exclusive_group() group.add_argument('--major', help='Bump the major version', action='store_true') group.add_argument('--minor', help='Bump the minor version', action='store_true') group.add_argument('--patch', help='Bump the patch version', action='store_true') args = parser.parse_args() return args def commit_patch_and_push(segment, current_version, version_file_path): git = Git() new_version = bump(segment, version_file_path) print(f'Commiting version {new_version}') git.commit(f'Set version to {new_version}', version_file_path) print(f'Tagging version {new_version}') git.add_tag(new_version) print(f'Creating patch between version {current_version} and {new_version}') patch_file = create_patch(current_version, new_version) print(f'Adding file {patch_file} to git tracking') git.add(patch_file) print(f'Commiting {patch_file}') git.commit(f'Adding patch file for version "{new_version}"', patch_file) print('Pushing everything') git.push() print(f'Pushing tag {new_version}') git.push('origin', new_version) return new_version def tag(segment): bootstrap() check_branch() version_file_path = os.path.join('{{cookiecutter.project_slug}}', '_CI', '.VERSION') current_version = open(version_file_path).read().strip() if segment: version = commit_patch_and_push(segment, current_version, version_file_path) else: version = bump(segment, version_file_path) print(version) raise SystemExit(0) if __name__ == '__main__': args = get_arguments() segment = next((argument for argument in ('major', 'minor', 'patch') if getattr(args, argument)), None) tag(segment)
{ "content_hash": "9fcdd5bbaf7a3f65bd774fbd9f311816", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 91, "avg_line_length": 35.46666666666667, "alnum_prop": 0.6522556390977443, "repo_name": "costastf/python_library_cookiecutter", "id": "95016ecdd8640137c1ab5305fdc7f397a8a0603b", "size": "4362", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "_CI/scripts/tag.py", "mode": "33261", "license": "mit", "language": [ { "name": "Makefile", "bytes": "6862" }, { "name": "PowerShell", "bytes": "478" }, { "name": "Python", "bytes": "165889" }, { "name": "Shell", "bytes": "866" } ], "symlink_target": "" }
""" FASTMODE -- Provide real time response for each program. First, this checks to see if the proposal asks for fasttime response. If so, the task then copies the data to the fast directory for that proposal on saltpipe. If it is the first object data for that proposal, then it will also send an email to the contact PI Author Version Date ----------------------------------------------- S M Crawford (SAAO) 0.1 18 Jan 2012 """ import os import saltsafemysql as saltmysql import saltsafestring as saltstring from saltsafeio import email as sendemail def runfast(filename, propcode, obsdate, server, readmefile, sdbhost, sdbname, sdbuser, password): """Handle fast data delivery for the proposal. For a given filename """ if propcode is None or propcode=='None': return #first check in the sdb if fast data delivery is needed sdb=saltmysql.connectdb(sdbhost,sdbname, sdbuser, password) select_term='Distinct Surname, email, username, ProposalCode_Id' from_term=''' Block join Pointing using (Block_Id) join PipelineConfig using (Pointing_Id) join ProposalCode using (ProposalCode_Id) join PipelineDataAccessMethod using (PipelineDataAccessMethod_Id) join ProposalContact using (ProposalCode_Id) join Investigator on (Investigator_Id=Contact_Id) join PiptUser using (PiptUser_Id) ''' where_term="Proposal_Code like '%s' and DataAccessMethod='Fast'" \ % (propcode) #print 'Select %s from %s where %s' % (select_term, from_term, where_term) try: record=saltmysql.select(sdb, select_term, from_term, where_term) except Exception, e: print e return None #print "Checking for fast data" #print record if record: surname, email, username, propid= record[0] #print surname, email, username, propid else: return #second if so, then copy the data to the contact PI directory #on saltpipe under the fast directory. #rawfilename=getrawfilename(filename) y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (filename, username, obsdate)) if y==256: y=os.system('ssh sa@saltpipe mkdir /salt/ftparea/%s/fast%s' % (username, obsdate)) y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (filename, username, obsdate)) if y!=0: print "Problem with copying file %s to /salt/ftparea/%s/fast%s/" % (filename, username, obsdate) #copy the reduced data y=os.system('scp mbxp%s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (os.path.basename(filename), username, obsdate)) #check the type of data it is and copy over an ancillery data as well #if it is the first object file, check to see if an email has been #sent, and if not, send email #try to copy the spectroscopic data print filename, filename.startswith('P') if os.path.basename(filename).startswith('P'): sfilename='smbxp%s.txt' % (os.path.basename(filename).split('.fits')[0]) print sfilename try: y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (sfilename, username, obsdate)) except Exception, e: print e if os.path.basename(filename).startswith('S'): try: sfilename='mbxp%s.cat' % (os.path.basename(filename).split('.fits')[0]) print sfilename y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (sfilename, username, obsdate)) except Exception, e: print e #check to see if an email has been sent select_term='PipelineStatus' from_term=''' PipelineProposalStatistics join PipelineStatus using (PipelineStatus_Id) join NightInfo using (NightInfo_Id) join ProposalCode using (ProposalCode_Id) ''' where_term="Proposal_Code like '%s' and Date='%s-%s-%s'" % (propcode, obsdate[0:4], obsdate[4:6], obsdate[6:8]) print select_term, from_term, where_term try: record=saltmysql.select(sdb, select_term, from_term, where_term)[0][0] except: record=None print record if record=='FastEmail': return else: #insert information into the database nightinfoid=saltmysql.getnightinfoid(sdb, obsdate) insert_term="NightInfo_Id=%i, ProposalCode_Id=%i, PipelineStatus_Id=8" % (nightinfoid, propid) table_term="PipelineProposalStatistics" saltmysql.insert(sdb, insert_term, "PipelineProposalStatistics") #send email sender='sa@salt.ac.za' recipient=email bcc='crawfordsm@gmail.com' subject='SALT data available for %s' % propcode message=open(readmefile).read() message=message.replace('OBSDATE', obsdate) sendemail(server,'sa',password,sender,recipient,bcc, subject,message) sdb.close() return def getrawfilepath(filename): """Given a raw file name, returns the path on the SALT server of the raw file""" if filename.count('S'): ddir='salt/scam/' i=filename.index('S') filedate=saltstring.filedate(filename[i:]) elif filename.count('P'): ddir='salt/rss/' i=filename.index('P') filedate=saltstring.filedate(filename[i:]) print ddir, filedate return '%s%s/%s/raw/%s' % (ddir, filedate[0:4], filedate[4:8], filename)
{ "content_hash": "5484c6235b88b93b4819626f8efda0fb", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 115, "avg_line_length": 38.03623188405797, "alnum_prop": 0.6662221375500095, "repo_name": "saltastro/pysalt", "id": "a6a44ff72593deed52b92fa4fae3816e152cccab", "size": "7613", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plugins/fastmode.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "9334" }, { "name": "Common Lisp", "bytes": "19932" }, { "name": "Makefile", "bytes": "856" }, { "name": "Python", "bytes": "1359528" }, { "name": "Smalltalk", "bytes": "271" } ], "symlink_target": "" }
from baby_steps import given, then, when from district42 import schema from .._fixtures import * # noqa: F401, F403 def test_const_generation(*, generate, random_): with given: sch = schema.const with when: res = generate(sch) with then: assert res is None def test_const_value_generation(*, generate, random_): with given: val = "banana" sch = schema.const(val) with when: res = generate(sch) with then: assert res == val
{ "content_hash": "6a1f341fae96adf0f2837893b94a9a1c", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 54, "avg_line_length": 19, "alnum_prop": 0.6042884990253411, "repo_name": "nikitanovosibirsk/blahblah", "id": "8f7d3ea7b040fa56361489c0a5ca6e0a062d047b", "size": "513", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/const/test_const_generation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "1389" }, { "name": "Python", "bytes": "48064" } ], "symlink_target": "" }
import os from heartbeat import BaseTest class Test(BaseTest): def test_base(self): """ Basic test with exiting Heartbeat normally """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*" ) heartbeat_proc = self.start_beat() self.wait_until(lambda: self.log_contains("heartbeat is running")) heartbeat_proc.check_kill_and_wait()
{ "content_hash": "22fe4d3e78748d3b4496b74dba4d1b56", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 74, "avg_line_length": 25.823529411764707, "alnum_prop": 0.6127562642369021, "repo_name": "christiangalsterer/execbeat", "id": "758fa108641c2d4315ecba7bcf0d44a35410335b", "size": "439", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "vendor/github.com/elastic/beats/heartbeat/tests/system/test_base.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "7422" }, { "name": "Makefile", "bytes": "1184" }, { "name": "Shell", "bytes": "28" } ], "symlink_target": "" }
import bge class Animation(object): """Base class for animations for Animations for 2D Sprites frames=list(str())""" def __init__(self,*frames): self.frames=list() if "list" in str(frames[0].__class__): self.frames=frames[0] else: for i in range(0,len(frames)): self.frames.append(frames[i]) def __getitem__(self,index): return(self.frames[index]) def __str__(self): a="Animation:(" for i in range(len(self.frames)-1): a+=self.frames[i]+", " a+=self.frames[-1]+")" return(a) def __repr__(self): return(self.__str__()) def __len__(self): return(len(self.frames)) class Sprite2D(): """Base class for animables 2D Sprites mainObject= The object name that owns all actions alwaysSensor='Always' sensor attached""" ########## ########## # ALWAYS # ---> # PYTHON # ########## ########## #Setup: #The owner must have a propierty named:"currentFrame" #type int, value=0 #Connect the Python controller to a Always sensors def __init__(self,mainObject,alwaysSensor): self.main=bge.logic.getCurrentScene().objects[mainObject] self.animations=dict() self.always=bge.logic.getCurrentController().sensors[alwaysSensor] self.always.usePosPulseMode=True self.currentAnimation=None def addAnimation(self,name,animation): self.animations[name]=animation def playAnimation(self,animation,frameRate,mode): tic=bge.logic.getAverageFrameRate() ani=self.animations[animation] self.always.frequency=int(tic/frameRate) self.currentAnimation=animation if mode=="loopStop": if self.main["currentFrame"]==(len(ani)-1): self.main["currentFrame"]=0 else: self.main["currentFrame"]+=1 self.main.replaceMesh(ani[self.main["currentFrame"]]) elif mode=="playOnce": if self.main["currentFrame"]==(len(ani)-1): self.currentAnimation=None else: self.main["currentFrame"]+=1 self.main.replaceMesh(ani[self.main["currentFrame"]]) class Character2D(): """Base clase for 2D character sprite: Sprite() collision: Object name to use as physical interactions""" def __init__(self,sprite,parent): self.sprite=sprite self.parent=bge.logic.getCurrentScene().objects[parent]
{ "content_hash": "286f60975605c02ba13e2673b76fee08", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 74, "avg_line_length": 35.929577464788736, "alnum_prop": 0.5825166601332811, "repo_name": "hikaruAi/hikaBGE", "id": "bd3d8b8e3d422cb8e4e87b5bef7a5a51ae467b1f", "size": "2551", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sprites2D.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19025" } ], "symlink_target": "" }
import uuid from xml.dom import minidom from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from jacket.api.storage.storage import common from jacket.api.storage.storage.contrib import volume_image_metadata from jacket.api.storage.storage.openstack import wsgi from jacket import context from jacket import db from jacket.storage import exception from jacket.objects import storage from jacket.storage import test from jacket.tests.storage.unit.api import fakes from jacket.tests.storage.unit import fake_volume from jacket.storage import volume def fake_db_volume_get(*args, **kwargs): return { 'id': 'fake', 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': 'fake', 'migration_status': None, '_name_id': 'fake2', 'attach_status': 'detached', } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext('admin', 'fake', True) db_volume = fake_db_volume_get() return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return storage.VolumeList(storage=[fake_volume_api_get()]) def fake_volume_get_all_empty(*args, **kwargs): return storage.VolumeList(storage=[]) fake_image_metadata = { 'image_id': 'someid', 'image_name': 'fake', 'kernel_id': 'somekernel', 'ramdisk_id': 'someramdisk', } def fake_get_volume_image_metadata(*args, **kwargs): return fake_image_metadata def fake_get_volumes_image_metadata(*args, **kwargs): return {'fake': fake_image_metadata} def return_empty_image_metadata(*args, **kwargs): return {} def volume_metadata_delete(context, volume_id, key, meta_type): pass def fake_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return fake_get_volume_image_metadata() def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') class VolumeImageMetadataTest(test.TestCase): content_type = 'application/json' def setUp(self): super(VolumeImageMetadataTest, self).setUp() self.stubs.Set(volume.api.API, 'get', fake_volume_api_get) self.stubs.Set(volume.api.API, 'get_all', fake_volume_get_all) self.stubs.Set(volume.api.API, 'get_volume_image_metadata', fake_get_volume_image_metadata) self.stubs.Set(volume.api.API, 'get_volumes_image_metadata', fake_get_volumes_image_metadata) self.UUID = uuid.uuid4() self.controller = (volume_image_metadata. VolumeImageMetadataController()) def _make_request(self, url): req = webob.Request.blank(url) req.accept = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_image_metadata(self, body): return jsonutils.loads(body)['volume']['volume_image_metadata'] def _get_image_metadata_list(self, body): return [ volume['volume_image_metadata'] for volume in jsonutils.loads(body)['volumes'] ] def _create_volume_and_glance_metadata(self): ctxt = context.get_admin_context() storage.volume_create(ctxt, {'id': 'fake', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) storage.volume_glance_metadata_create(ctxt, 'fake', 'image_id', 'someid') storage.volume_glance_metadata_create(ctxt, 'fake', 'image_name', 'fake') storage.volume_glance_metadata_create(ctxt, 'fake', 'kernel_id', 'somekernel') storage.volume_glance_metadata_create(ctxt, 'fake', 'ramdisk_id', 'someramdisk') def test_get_volume(self): self._create_volume_and_glance_metadata() res = self._make_request('/v2/fake/volumes/%s' % self.UUID) self.assertEqual(200, res.status_int) self.assertEqual(fake_image_metadata, self._get_image_metadata(res.body)) def test_list_detail_volumes(self): self._create_volume_and_glance_metadata() res = self._make_request('/v2/fake/volumes/detail') self.assertEqual(200, res.status_int) self.assertEqual(fake_image_metadata, self._get_image_metadata_list(res.body)[0]) def test_list_detail_empty_volumes(self): def fake_dont_call_this(*args, **kwargs): fake_dont_call_this.called = True fake_dont_call_this.called = False self.stubs.Set(volume.api.API, 'get_list_volumes_image_metadata', fake_dont_call_this) self.stubs.Set(volume.api.API, 'get_all', fake_volume_get_all_empty) res = self._make_request('/v2/fake/volumes/detail') self.assertEqual(200, res.status_int) self.assertFalse(fake_dont_call_this.called) def test_list_detail_volumes_with_limit(self): ctxt = context.get_admin_context() storage.volume_create(ctxt, {'id': 'fake', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) storage.volume_glance_metadata_create(ctxt, 'fake', 'key1', 'value1') storage.volume_glance_metadata_create(ctxt, 'fake', 'key2', 'value2') res = self._make_request('/v2/fake/volumes/detail?limit=1') self.assertEqual(200, res.status_int) self.assertEqual({'key1': 'value1', 'key2': 'value2'}, self._get_image_metadata_list(res.body)[0]) def test_create_image_metadata(self): self.stubs.Set(volume.api.API, 'get_volume_image_metadata', return_empty_image_metadata) self.stubs.Set(storage, 'volume_metadata_update', fake_create_volume_metadata) body = {"os-set_image_metadata": {"metadata": fake_image_metadata}} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) def test_create_with_keys_case_insensitive(self): # If the keys in uppercase_and_lowercase, should return the one # which server added self.stubs.Set(volume.api.API, 'get_volume_image_metadata', return_empty_image_metadata) self.stubs.Set(storage, 'volume_metadata_update', fake_create_volume_metadata) body = { "os-set_image_metadata": { "metadata": { "Image_Id": "someid", "image_name": "fake", "Kernel_id": "somekernel", "ramdisk_id": "someramdisk" }, }, } req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) def test_create_empty_body(self): req = fakes.HTTPRequest.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, None) def test_create_nonexistent_volume(self): self.stubs.Set(volume.api.API, 'get', return_volume_nonexistent) req = fakes.HTTPRequest.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.content_type = "application/json" body = {"os-set_image_metadata": { "metadata": {"image_name": "fake"}} } req.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, 1, body) def test_invalid_metadata_items_on_create(self): self.stubs.Set(storage, 'volume_metadata_update', fake_create_volume_metadata) req = fakes.HTTPRequest.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.headers["content-type"] = "application/json" data = {"os-set_image_metadata": { "metadata": {"a" * 260: "value1"}} } # Test for long key req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, 1, data) # Test for long value data = {"os-set_image_metadata": { "metadata": {"key": "v" * 260}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, req, 1, data) # Test for empty key. data = {"os-set_image_metadata": { "metadata": {"": "value1"}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, data) def test_delete(self): self.stubs.Set(storage, 'volume_metadata_delete', volume_metadata_delete) body = {"os-unset_image_metadata": { "key": "ramdisk_id"} } req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) def test_delete_meta_not_found(self): data = {"os-unset_image_metadata": { "key": "invalid_id"} } req = fakes.HTTPRequest.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.body = jsonutils.dump_as_bytes(data) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, data) def test_delete_nonexistent_volume(self): self.stubs.Set(storage, 'volume_metadata_delete', return_volume_nonexistent) body = {"os-unset_image_metadata": { "key": "fake"} } req = fakes.HTTPRequest.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, body) def test_show_image_metadata(self): body = {"os-show_image_metadata": None} req = webob.Request.blank('/v2/fake/volumes/1/action') req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) class ImageMetadataXMLDeserializer(common.MetadataXMLDeserializer): metadata_node_name = "volume_image_metadata" class VolumeImageMetadataXMLTest(VolumeImageMetadataTest): content_type = 'application/xml' def _get_image_metadata(self, body): deserializer = wsgi.XMLDeserializer() volume = deserializer.find_first_child_named( minidom.parseString(body), 'volume') image_metadata = deserializer.find_first_child_named( volume, 'volume_image_metadata') return wsgi.MetadataXMLDeserializer().extract_metadata(image_metadata) def _get_image_metadata_list(self, body): deserializer = wsgi.XMLDeserializer() volumes = deserializer.find_first_child_named( minidom.parseString(body), 'volumes') volume_list = deserializer.find_children_named(volumes, 'volume') image_metadata_list = [ deserializer.find_first_child_named( volume, 'volume_image_metadata' ) for volume in volume_list] metadata_deserializer = wsgi.MetadataXMLDeserializer() return [metadata_deserializer.extract_metadata(image_metadata) for image_metadata in image_metadata_list]
{ "content_hash": "e23f3568590f30d14bf938a6ecb68d8e", "timestamp": "", "source": "github", "line_count": 349, "max_line_length": 81, "avg_line_length": 37.55873925501432, "alnum_prop": 0.599023497101007, "repo_name": "HybridF5/jacket", "id": "8137fe54ed4d0e9f95c009456cf4be4ee10f7c7a", "size": "13713", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jacket/tests/storage/unit/api/contrib/test_volume_image_metadata.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "26995056" }, { "name": "Shell", "bytes": "28464" }, { "name": "Smarty", "bytes": "291947" } ], "symlink_target": "" }
from haas import rest from abc import ABCMeta, abstractmethod from StringIO import StringIO import unittest import json import sys from werkzeug.routing import Map from werkzeug.wrappers import Request from schema import Schema, Optional # We don't directly use this, but unless we import it, the coverage tool # complains and doesn't give us a report. import pytest def wsgi_mkenv(method, path, data=None): """Helper routine to build a wsgi environment. We need this to generate mock requests. """ env = { 'REQUEST_METHOD': method, 'SCRIPT_NAME': '', 'PATH_INFO': path, 'SERVER_NAME': 'haas.test-env', 'SERVER_PORT': '5000', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.errors': sys.stderr, 'wsgi.multithreaded': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False, } if data is None: env['wsgi.input'] = StringIO() else: env['wsgi.input'] = StringIO(data) return env class HttpTest(unittest.TestCase): """A test which excercises the http server. HttpTests run with no api functions registered to the http server yet; this lets us test the http-related code in an environment that is not constrained by our actual api. """ def setUp(self): # We back up the old _url_map, and restore it in tearDown; this makes # it easy to be sure that we're not interfering with other tests: self.old_url_map = rest._url_map # We make ourselves an empty one for our test: rest._url_map = Map() def tearDown(self): rest._url_map = self.old_url_map class HttpEquivalenceTest(object): """A test that ensures a particlar call to the api behaves the same over http and when called as a function. Subclasses must override `api_call` and `request`, and may also be interested in `api_setup` and `api_teardown`. """ __metaclass__ = ABCMeta @abstractmethod def api_call(self): """Invoke the api call directly.""" @abstractmethod def request(self): """Return a request which will invoke the api call. The request should take the form of a WSGI v1.0 environment. The function `wsgi_mkenv` can be used to build a suitable environment. """ def api_setup(self): """Setup routine to be run before each call to the api. This is conceptually similar to python's unittest setUp() method, but with each call to `api_call`, rather than the whole test. By default this is a noop; subclasses should override this if they need specific environments. """ def api_teardown(self): """like `api_setup`, but tears things down after the call.""" def test_equivalence(self): """Calling `api_call` directly should be the same as via http.""" # First invoke the call over http. This should never raise exceptions. self.api_setup() req = Request(self.request()) resp = rest.request_handler(req) body = resp.get_data() self.api_teardown() # Now call it directly. try: self.api_setup() ret = self.api_call() if ret is None: ret_body, ret_status = '', 200 elif type(ret) is tuple: ret_body, ret_status = ret else: ret_body, ret_status = ret, 200 assert resp.status_code == ret_status if ret_body == '': assert body == '' else: assert json.loads(body) == json.loads(ret_body) except rest.APIError, e: assert resp.status_code == e.status_code assert json.loads(body) == {'type': e.__class__.__name__, 'msg': e.message, } finally: self.api_teardown() class TestUrlArgs(HttpEquivalenceTest, HttpTest): """Test that arguments supplied in the url are passed correctly.""" # The use of HTTPEquivalenceTest here is a bit weird; We're not actually # calling the api function from `api_call`. This is actually probably a # fairly common way to want to use the superclass; we should think about # whether the documented usage is necessarily the right idea. def setUp(self): HttpTest.setUp(self) @rest.rest_call('GET', '/func/<foo>/<bar>') def func(foo, bar): return json.dumps([foo, bar]) def api_call(self): return json.dumps(['alice', 'bob']) def request(self): return wsgi_mkenv('GET', '/func/alice/bob') class ReturnTest(object): """Superclass for the three tests TestReturn* below. Each of these is an HttpEquivalenceTest which exercises the different kinds of permitted return values. """ def api_call(self): return self.foo() def request(self): return wsgi_mkenv('GET', '/foo') class TestReturn0(ReturnTest, HttpEquivalenceTest, HttpTest): def setUp(self): HttpTest.setUp(self) @rest.rest_call('GET', '/foo') def foo(): pass self.foo = foo class TestReturn1(ReturnTest, HttpEquivalenceTest, HttpTest): def setUp(self): HttpTest.setUp(self) @rest.rest_call('GET', '/foo') def foo(): return '"foo"' self.foo = foo class TestReturn2(ReturnTest, HttpEquivalenceTest, HttpTest): def setUp(self): HttpTest.setUp(self) @rest.rest_call('GET', '/foo') def foo(): return '"foo"', 202 self.foo = foo class TestBodyArgs(HttpEquivalenceTest, HttpTest): """Test that arguments supplied in the body are passed correctly.""" def setUp(self): HttpTest.setUp(self) @rest.rest_call('POST', '/func/foo') def foo(bar, baz): return json.dumps([bar, baz]) def api_call(self): return json.dumps(['bonnie', 'clyde']) def request(self): return wsgi_mkenv('POST', '/func/foo', data=json.dumps({'bar': 'bonnie', 'baz': 'clyde'})) class TestRestCallSchema(HttpEquivalenceTest, HttpTest): """Test that an alternate schema is used if one is provided to rest_call.""" def setUp(self): HttpTest.setUp(self) @rest.rest_call('POST', '/product', schema=Schema({ 'x': int, 'y': int, Optional('z'): int, })) def product(x, y, z=1): return json.dumps(x * y * z) def api_call(self): return json.dumps(14) def request(self): return wsgi_mkenv('POST', '/product', data=json.dumps({'x': 2, 'y': 7})) class TestEquiv_basic_APIError(HttpEquivalenceTest, HttpTest): """Basic test to make sure the APIError handling code is excercised.""" def setUp(self): HttpTest.setUp(self) @rest.rest_call('GET', '/some_error') def some_error(): self.api_call() def api_call(self): raise rest.APIError("Basic test of the APIError code.") def request(self): return wsgi_mkenv('GET', '/some_error') def _is_error(resp, errtype): """Return True iff the Response `resp` represents an `errtype`. `resp` should be a response returned by `request_handler`. `errtype` should be a subclass of APIError. """ try: return json.loads(resp.get_data())['type'] == errtype.__name__ except: # It's possible that this response isn't even an error, in which case # the data may not parse as the above statement is expecting. Well, # it's not an error, so: return False class TestValidationError(HttpTest): """basic tests for input validation.""" def setUp(self): HttpTest.setUp(self) @rest.rest_call('POST', '/give-me-an-e') def api_call(foo, bar): pass @rest.rest_call('PUT', '/custom-schema', schema=Schema({ "the_value": int, })) def custom_schema(the_value): return repr(the_value) def _do_request(self, data): """Make a request to the endpoint with `data` in the body. `data` should be a string -- the server will expect valid json, but we want to write test cases with invalid input as well. """ req = Request(wsgi_mkenv('POST', '/give-me-an-e', data=data)) return rest.request_handler(req) def test_ok(self): assert not _is_error(self._do_request(json.dumps({'foo': 'alice', 'bar': 'bob'})), rest.ValidationError) def test_bad_json(self): assert _is_error(self._do_request('xploit'), rest.ValidationError) def test_missing_bar(self): assert _is_error(self._do_request(json.dumps({'foo': 'hello'})), rest.ValidationError) def test_extra_baz(self): assert _is_error(self._do_request(json.dumps({'foo': 'alice', 'bar': 'bob', 'baz': 'eve'})), rest.ValidationError) def test_custom_schema(self): assert _is_error(self._do_request(json.dumps({ 'the_value': 'Not an integer!', })), rest.ValidationError) class TestCallOnce(HttpTest): """Verify that the request handler invokes the API *exactly* once. This is a regression test; A previous refactoring introduced a bug where the api function was called twice. """ def setUp(self): HttpTest.setUp(self) self.num_calls = 0 def test_call_once(self): # We define an API call that increments a counter each time the # function is called, then invoke it via HTTP. Finally, we verify that # the counter is equal to 1, indicating that the function was called # the correct number of times. @rest.rest_call('POST', '/increment') def increment(): """Increment a counter each time this function is called.""" self.num_calls += 1 rest.request_handler(Request(wsgi_mkenv('POST', '/increment'))) assert self.num_calls == 1
{ "content_hash": "f244c61a6435f609cb5fac6912925e69", "timestamp": "", "source": "github", "line_count": 344, "max_line_length": 80, "avg_line_length": 30.281976744186046, "alnum_prop": 0.5837573197657675, "repo_name": "SahilTikale/switchHaaS", "id": "1b9151f8579414b73c2cc04be9188c2a1724ac81", "size": "11033", "binary": false, "copies": "1", "ref": "refs/heads/haas-as-a-switch", "path": "tests/unit/rest.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "194485" } ], "symlink_target": "" }
from typing import Tuple, Union from django.contrib import messages from django.contrib.auth import login, update_session_auth_hash from django.contrib.auth.mixins import UserPassesTestMixin from django.contrib.auth.views import LoginView from django.core.handlers.wsgi import WSGIRequest from django.http import HttpResponseRedirect from django.http.request import QueryDict from django.urls.base import reverse, reverse_lazy from django.views import generic from django.views.generic.edit import FormView from guardian.mixins import LoginRequiredMixin from more_itertools import bucket from accounts import forms from accounts.backends import TWO_FACTOR_AUTH_SESSION_KEY from accounts.forms import TOTPCheckForm from accounts.models import GoogleAuthenticatorTOTP, User class LoginWithRedirectToTwoFactorAuthView(LoginView): """Step 1 of the login process.""" def get_success_url(self) -> str: user: User = self.request.user otp: GoogleAuthenticatorTOTP = getattr(user, "otp") if otp and otp.activated: return reverse("accounts:2fa-login") elif user.is_researcher or user.is_staff: messages.warning( self.request, "If you're a researcher or Lookit staff, you'll want to set up " "2FA with us. Please complete the Two-Factor Auth setup below " "and you'll be on your way!", ) return reverse("accounts:2fa-setup") else: return super().get_success_url() class TwoFactorAuthLoginView(UserPassesTestMixin, LoginView): """Semi-optional two-factor authentication login. Researchers must have 2FA activated and verified prior to viewing /exp/ pages. Participants are precluded from visiting this page. """ form_class = TOTPCheckForm def user_is_researcher_or_staff(self): return getattr(self.request.user, "is_researcher") or getattr( self.request.user, "is_staff" ) test_func = user_is_researcher_or_staff def form_valid(self, form): """Override base functionality to skip auth part. Since OTP was already checked during Form cleaning process, we can just redirect here. """ self.request.session[TWO_FACTOR_AUTH_SESSION_KEY] = True return HttpResponseRedirect(self.get_success_url()) def get_redirect_url(self) -> str: """Have a good default for researchers - the study list.""" return super().get_redirect_url() or reverse("exp:study-list") class ResearcherRegistrationView(generic.CreateView): template_name = "accounts/researcher-registration.html" model = User form_class = forms.ResearcherRegistrationForm def form_valid(self, form): """If the registration process went well, log the user in.""" # UserRegistrationForm.is_valid() should do proper authentication resp = super().form_valid(form) # We expect user to be loaded by `ModelFormMixin.form_valid` user: User = getattr(self, "object") # Following with what django.auth.views.LoginView does here. login( self.request, user, backend="accounts.backends.TwoFactorAuthenticationBackend", ) messages.success(self.request, "Researcher account created.") return resp def get_success_url(self) -> str: """Researchers go to 2FA setup after they finish regular registration. Leverage the fact that `ModelFormMixin.form_valid` sets `self.object = form.save()` prior to calling the super() method (`FormMixin.form_valid`), which actually does the work of constructing the HttpRedirectResponse for us. """ return reverse("accounts:2fa-setup") class TwoFactorAuthSetupView(LoginRequiredMixin, UserPassesTestMixin, FormView): template_name = "accounts/2fa-setup.html" form_class = forms.TOTPCheckForm success_url = reverse_lazy("exp:study-list") permission_denied_message = ( "For security reasons, once you've activated Two Factor Authentication, you " "can't access the QR code again. If you are locked out of your account and " "need to reset 2FA to get back in, please contact lookit-tech@mit.edu." ) def get_context_data(self, **kwargs): context = super().get_context_data() otp = GoogleAuthenticatorTOTP.objects.get_or_create(user=self.request.user)[0] context["svg_qr_code"] = otp.get_svg_qr_code() return context def get_form_kwargs(self): """Pass the request object to our special TOTPCheckForm.""" kwargs = super().get_form_kwargs() kwargs["request"] = self.request return kwargs def form_valid(self, form): """Executed when the OTP code has been verified. If the form is valid, the session should be marked as using 2FA. """ otp: GoogleAuthenticatorTOTP = getattr(self.request.user, "otp") otp.activated = True otp.save() self.request.session[TWO_FACTOR_AUTH_SESSION_KEY] = True return super().form_valid(form) def check_researcher_status_and_otp_presence(self): """Guard function. 1) Make sure they're a researcher. 2) Don't let the user see the QR code if they've had a chance to set up OTP already. 3) If they're just checking their OTP code, let the request through. """ user: User = self.request.user method: str = self.request.method if not user.is_researcher: return False if method == "GET": # If the user has TOTP set up already, then they shouldn't be able to # see the QR code again. return not user.otp or not getattr(user.otp, "activated") elif method == "POST": # TOTP checks, however, only depend on the user having an OTP object # associated with their account. return bool(user.otp) else: return False test_func = check_researcher_status_and_otp_presence class AccountManagementView(LoginRequiredMixin, generic.TemplateView): """Handles user info, password change, and 2FA management.""" ACCOUNT_FORM_PREFIX = "account" PASSWORD_FORM_PREFIX = "password" OTP_FORM_PREFIX = "otp" template_name = "accounts/account-update.html" update_account_form_class = forms.AccountUpdateForm change_password_form_class = forms.PasswordChangeForm otp_check_form_class = forms.TOTPCheckForm def post(self, request: WSGIRequest): """Process forms dependent on state, then render as with `get`. We only allow submission for one form at a time. Furthermore, out OTP check form only validates the given auth code; what we do with the validated auth code depends on the form handle associated with the particular submit button on the form. """ post_data = self.request.POST user, otp = self._get_user_and_otp() action = post_data["form-handle"] form = next(f for f in self._get_forms() if f.is_bound) if form.is_valid(): # Execute the action indicated by the form handle. if action == "update-account": user = form.save() messages.success(request, f"{user} Successfully saved") elif action == "change-password": user = form.save() # Re-cycle session for user. update_session_auth_hash(request, user) # Nuke old form data - otherwise the validation will kick in. # TODO: We probably don't have to trick _get_forms here. Find a better way? self.request.POST = QueryDict() messages.success(request, "Password successfully changed") elif action == "activate-otp": otp.activated = True otp.save() request.session[TWO_FACTOR_AUTH_SESSION_KEY] = True messages.success(request, "Two factor auth activated!") elif action == "deactivate-otp": otp.delete() request.session[TWO_FACTOR_AUTH_SESSION_KEY] = False messages.success( request, "Two factor auth deactivated. You will need to reset with " "a new QR code if you want to activate it again.", ) else: messages.error(request, "There was an error.") return super().get(request) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) user, otp = self._get_user_and_otp() update_account_form, change_password_form, otp_check_form = self._get_forms() context.update( { "update_account_form": update_account_form, "change_password_form": change_password_form, "otp_check_form": otp_check_form, "user": user, "otp": otp, } ) return context def _get_user_and_otp(self) -> Tuple[User, Union[GoogleAuthenticatorTOTP, None]]: user: User = self.request.user otp: Union[GoogleAuthenticatorTOTP, None] try: otp = GoogleAuthenticatorTOTP.objects.get(user=user) except GoogleAuthenticatorTOTP.DoesNotExist: otp = None return user, otp def _get_forms( self, ) -> (forms.AccountUpdateForm, forms.PasswordChangeForm, forms.TOTPCheckForm): """Bind forms appropriately for method.""" request = self.request # TODO: switch to normal attribute access after this is fixed # https://youtrack.jetbrains.com/issue/PY-37457 post_data: QueryDict = getattr(request, "POST") # Bucket into new QueryDicts based on prefix. Must use MultiValueDict.update # to enforce list containers for values. buckets = bucket(post_data.items(), lambda pair: pair[0].partition("-")[0]) account_update = QueryDict(mutable=True) account_update.update(dict(buckets[self.ACCOUNT_FORM_PREFIX])) password_change = QueryDict(mutable=True) password_change.update(dict(buckets[self.PASSWORD_FORM_PREFIX])) otp_check = QueryDict(mutable=True) otp_check.update(dict(buckets[self.OTP_FORM_PREFIX])) # When data is set to None, the form will not bind. return ( self.update_account_form_class( instance=request.user, data=account_update or None, prefix=self.ACCOUNT_FORM_PREFIX, ), self.change_password_form_class( request.user, data=password_change or None, prefix=self.PASSWORD_FORM_PREFIX, ), self.otp_check_form_class( request=request, data=otp_check or None, prefix=self.OTP_FORM_PREFIX ), )
{ "content_hash": "e815fbc5d460e1d684c123015aaece19", "timestamp": "", "source": "github", "line_count": 285, "max_line_length": 91, "avg_line_length": 38.887719298245614, "alnum_prop": 0.6309663448524767, "repo_name": "CenterForOpenScience/lookit-api", "id": "0e7675ce536171a47cd13539fb303210a28e5bfe", "size": "11083", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "accounts/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "11022" }, { "name": "HTML", "bytes": "185393" }, { "name": "Python", "bytes": "481700" }, { "name": "Shell", "bytes": "1166" } ], "symlink_target": "" }
import tensorflow as tf from setags.data.input import EMBEDDING_SIZE from setags.data.utils import BIOTag from setags.utils import DictWrapper class Features(DictWrapper): def __init__(self): self.id = None self.title = None self.title_length = None self.content = None self.content_length = None class Labels(DictWrapper): def __init__(self): self.title_bio = None self.content_bio = None class Params(DictWrapper): def __init__(self): self.num_epochs = 70 self.batch_size = 64 self.max_word_idx = None self.num_title_units = 300 self.num_content_units = 200 self.learning_rate = 0.002 BIO_ENCODING_SIZE = sum(1 for _ in BIOTag) DEFAULT_PARAMS = Params().as_dict() def model_fn(mode, features, labels, params): _params = Params.from_dict(params) _features = Features.from_dict(features) _labels = None if mode != tf.estimator.ModeKeys.PREDICT: _labels = Labels.from_dict(labels) return build_model(mode, _features, _labels, _params) def build_model(mode: tf.estimator.ModeKeys, features: Features, labels: Labels, params: Params) -> tf.estimator.EstimatorSpec: with tf.device("/cpu:0"): embeddings = tf.placeholder(tf.float32, [None, EMBEDDING_SIZE], name='embeddings') embedded_title = tf.nn.embedding_lookup(embeddings, tf.nn.relu(features.title)) embedded_content = tf.nn.embedding_lookup(embeddings, tf.nn.relu(features.content)) with tf.variable_scope("encoder"): with tf.variable_scope("title"): title_encoder = RNNLayer(embedded_title, features.title_length, params.num_title_units) with tf.variable_scope("content"): title_final_state = tf.layers.dense(title_encoder.final_state, EMBEDDING_SIZE, use_bias=False) title_affected_content = tf.expand_dims(title_final_state, -2) + embedded_content content_encoder_outputs = softsign_glu(embedded_content, title_affected_content) with tf.variable_scope("output"): title_bio_logits = tf.layers.dense(title_encoder.outputs, BIO_ENCODING_SIZE) content_bio_logits = tf.layers.dense(content_encoder_outputs, BIO_ENCODING_SIZE) title_bio_predictions = tf.argmax(title_bio_logits, -1) content_bio_predictions = tf.argmax(content_bio_logits, -1) # Assign a default value to the train_op and loss to be passed for modes other than TRAIN loss = None train_op = None eval_metric_ops = None # Following part of the network will be constructed only for training if mode != tf.estimator.ModeKeys.PREDICT: hot_title_bio = tf.one_hot(labels.title_bio, BIO_ENCODING_SIZE) hot_content_bio = tf.one_hot(labels.content_bio, BIO_ENCODING_SIZE) title_masks = Masks(labels.title_bio, title_bio_predictions, features.title_length) content_masks = Masks(labels.content_bio, content_bio_predictions, features.content_length) title_bio_precision_loss = tf.losses.softmax_cross_entropy( hot_title_bio, title_bio_logits, title_masks.predicted_tokens) content_bio_precision_loss = tf.losses.softmax_cross_entropy( hot_content_bio, content_bio_logits, content_masks.predicted_tokens) title_bio_recall_loss = tf.losses.softmax_cross_entropy( hot_title_bio, title_bio_logits, title_masks.annotated_tokens) content_bio_recall_loss = tf.losses.softmax_cross_entropy( hot_content_bio, content_bio_logits, content_masks.annotated_tokens) loss = tf.losses.get_total_loss() tf.summary.scalar('title_precision_loss', title_bio_precision_loss) tf.summary.scalar('content_precision_loss', content_bio_precision_loss) tf.summary.scalar('title_recall_loss', title_bio_recall_loss) tf.summary.scalar('content_recall_loss', content_bio_recall_loss) train_op = tf.contrib.layers.optimize_loss( loss=loss, global_step=tf.contrib.framework.get_global_step(), learning_rate=params.learning_rate, optimizer="Adam") if mode == tf.estimator.ModeKeys.EVAL: title_accuracy = tf.metrics.accuracy( labels.title_bio, title_bio_predictions, title_masks.length, name='title_accuracy') content_accuracy = tf.metrics.accuracy( labels.content_bio, content_bio_predictions, content_masks.length, name='content_accuracy') title_precision = tf.metrics.accuracy( labels.title_bio, title_bio_predictions, title_masks.predicted_tokens, name='title_precision') content_precision = tf.metrics.accuracy( labels.content_bio, content_bio_predictions, content_masks.predicted_tokens, name='content_precision') title_recall = tf.metrics.accuracy( labels.title_bio, title_bio_predictions, title_masks.annotated_tokens, name='title_recall') content_recall = tf.metrics.accuracy( labels.content_bio, content_bio_predictions, content_masks.annotated_tokens, name='content_recall') eval_metric_ops = { 'title_accuracy': title_accuracy, 'content_accuracy': content_accuracy, 'title_precision': title_precision, 'content_precision': content_precision, 'title_recall': title_recall, 'content_recall': content_recall } predictions = { 'id': features.id, 'title': features.title, 'title_length': features.title_length, 'title_bio': title_bio_predictions, 'content': features.content, 'content_length': features.content_length, 'content_bio': content_bio_predictions } return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops) class Masks: def __init__(self, tokens_bio: tf.Tensor, bio_predictions: tf.Tensor, length: tf.Tensor): self.length = tf.sequence_mask(length, tf.reduce_max(length), tf.float32) self.annotated_tokens = tf.cast(tf.greater(tokens_bio, 0), tf.float32) self.predicted_tokens = tf.cast(tf.greater(bio_predictions, 0), tf.float32) class RNNLayer: def __init__(self, inputs: tf.Tensor, inputs_lengths: tf.Tensor, num_hidden: int, initial_states: tuple = None): fw_cell = tf.nn.rnn_cell.GRUCell(num_hidden, activation=tf.nn.tanh) bw_cell = tf.nn.rnn_cell.GRUCell(num_hidden, activation=tf.nn.tanh) if initial_states is not None: fw_initial_state, bw_initial_state = initial_states else: fw_initial_state, bw_initial_state = None, None self.outputs_tuple, self.final_states_tuple = tf.nn.bidirectional_dynamic_rnn( fw_cell, bw_cell, inputs, inputs_lengths, initial_state_fw=fw_initial_state, initial_state_bw=bw_initial_state, dtype=tf.float32) @property def outputs(self): return tf.reduce_sum(self.outputs_tuple, axis=0) @property def final_state(self): return tf.reduce_sum(self.final_states_tuple, axis=0) def softsign_glu(values: tf.Tensor, gate_values: tf.Tensor): with tf.name_scope("softsign_glu"): return tf.nn.softsign(gate_values) * values
{ "content_hash": "f9963029838306d290f5914e3636ad51", "timestamp": "", "source": "github", "line_count": 212, "max_line_length": 116, "avg_line_length": 37.60377358490566, "alnum_prop": 0.6115153035624686, "repo_name": "pkubik/setags", "id": "0fed00a141617109a61ae5ea7376c527e8696fa0", "size": "7972", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setags/model.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42955" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_firewall_vip except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_vip.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_firewall_vip_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'firewall_vip': { 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) expected_data = { 'arp-reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns-mapping-ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous-arp-interval': '10', 'http-cookie-age': '11', 'http-cookie-domain': 'test_value_12', 'http-cookie-domain-from-host': 'disable', 'http-cookie-generation': '14', 'http-cookie-path': 'test_value_15', 'http-cookie-share': 'disable', 'http-ip-header': 'enable', 'http-ip-header-name': 'test_value_18', 'http-multiplex': 'enable', 'https-cookie-secure': 'disable', 'id': '21', 'ldb-method': 'static', 'mapped-addr': 'test_value_23', 'mappedport': 'test_value_24', 'max-embryonic-connections': '25', 'name': 'default_name_26', 'nat-source-vip': 'disable', 'outlook-web-access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping-type': '1-to-1', 'protocol': 'tcp', 'server-type': 'http', 'ssl-algorithm': 'high', 'ssl-certificate': 'test_value_35', 'ssl-client-fallback': 'disable', 'ssl-client-renegotiation': 'allow', 'ssl-client-session-state-max': '38', 'ssl-client-session-state-timeout': '39', 'ssl-client-session-state-type': 'disable', 'ssl-dh-bits': '768', 'ssl-hpkp': 'disable', 'ssl-hpkp-age': '43', 'ssl-hpkp-backup': 'test_value_44', 'ssl-hpkp-include-subdomains': 'disable', 'ssl-hpkp-primary': 'test_value_46', 'ssl-hpkp-report-uri': 'test_value_47', 'ssl-hsts': 'disable', 'ssl-hsts-age': '49', 'ssl-hsts-include-subdomains': 'disable', 'ssl-http-location-conversion': 'enable', 'ssl-http-match-host': 'enable', 'ssl-max-version': 'ssl-3.0', 'ssl-min-version': 'ssl-3.0', 'ssl-mode': 'half', 'ssl-pfs': 'require', 'ssl-send-empty-frags': 'enable', 'ssl-server-algorithm': 'high', 'ssl-server-max-version': 'ssl-3.0', 'ssl-server-min-version': 'ssl-3.0', 'ssl-server-session-state-max': '61', 'ssl-server-session-state-timeout': '62', 'ssl-server-session-state-type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic-server': 'disable', 'websphere-server': 'disable' } set_method_mock.assert_called_with('firewall', 'vip', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_firewall_vip_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'firewall_vip': { 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) expected_data = { 'arp-reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns-mapping-ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous-arp-interval': '10', 'http-cookie-age': '11', 'http-cookie-domain': 'test_value_12', 'http-cookie-domain-from-host': 'disable', 'http-cookie-generation': '14', 'http-cookie-path': 'test_value_15', 'http-cookie-share': 'disable', 'http-ip-header': 'enable', 'http-ip-header-name': 'test_value_18', 'http-multiplex': 'enable', 'https-cookie-secure': 'disable', 'id': '21', 'ldb-method': 'static', 'mapped-addr': 'test_value_23', 'mappedport': 'test_value_24', 'max-embryonic-connections': '25', 'name': 'default_name_26', 'nat-source-vip': 'disable', 'outlook-web-access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping-type': '1-to-1', 'protocol': 'tcp', 'server-type': 'http', 'ssl-algorithm': 'high', 'ssl-certificate': 'test_value_35', 'ssl-client-fallback': 'disable', 'ssl-client-renegotiation': 'allow', 'ssl-client-session-state-max': '38', 'ssl-client-session-state-timeout': '39', 'ssl-client-session-state-type': 'disable', 'ssl-dh-bits': '768', 'ssl-hpkp': 'disable', 'ssl-hpkp-age': '43', 'ssl-hpkp-backup': 'test_value_44', 'ssl-hpkp-include-subdomains': 'disable', 'ssl-hpkp-primary': 'test_value_46', 'ssl-hpkp-report-uri': 'test_value_47', 'ssl-hsts': 'disable', 'ssl-hsts-age': '49', 'ssl-hsts-include-subdomains': 'disable', 'ssl-http-location-conversion': 'enable', 'ssl-http-match-host': 'enable', 'ssl-max-version': 'ssl-3.0', 'ssl-min-version': 'ssl-3.0', 'ssl-mode': 'half', 'ssl-pfs': 'require', 'ssl-send-empty-frags': 'enable', 'ssl-server-algorithm': 'high', 'ssl-server-max-version': 'ssl-3.0', 'ssl-server-min-version': 'ssl-3.0', 'ssl-server-session-state-max': '61', 'ssl-server-session-state-timeout': '62', 'ssl-server-session-state-type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic-server': 'disable', 'websphere-server': 'disable' } set_method_mock.assert_called_with('firewall', 'vip', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_firewall_vip_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'firewall_vip': { 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) delete_method_mock.assert_called_with('firewall', 'vip', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_firewall_vip_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'firewall_vip': { 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) delete_method_mock.assert_called_with('firewall', 'vip', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_firewall_vip_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'firewall_vip': { 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) expected_data = { 'arp-reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns-mapping-ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous-arp-interval': '10', 'http-cookie-age': '11', 'http-cookie-domain': 'test_value_12', 'http-cookie-domain-from-host': 'disable', 'http-cookie-generation': '14', 'http-cookie-path': 'test_value_15', 'http-cookie-share': 'disable', 'http-ip-header': 'enable', 'http-ip-header-name': 'test_value_18', 'http-multiplex': 'enable', 'https-cookie-secure': 'disable', 'id': '21', 'ldb-method': 'static', 'mapped-addr': 'test_value_23', 'mappedport': 'test_value_24', 'max-embryonic-connections': '25', 'name': 'default_name_26', 'nat-source-vip': 'disable', 'outlook-web-access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping-type': '1-to-1', 'protocol': 'tcp', 'server-type': 'http', 'ssl-algorithm': 'high', 'ssl-certificate': 'test_value_35', 'ssl-client-fallback': 'disable', 'ssl-client-renegotiation': 'allow', 'ssl-client-session-state-max': '38', 'ssl-client-session-state-timeout': '39', 'ssl-client-session-state-type': 'disable', 'ssl-dh-bits': '768', 'ssl-hpkp': 'disable', 'ssl-hpkp-age': '43', 'ssl-hpkp-backup': 'test_value_44', 'ssl-hpkp-include-subdomains': 'disable', 'ssl-hpkp-primary': 'test_value_46', 'ssl-hpkp-report-uri': 'test_value_47', 'ssl-hsts': 'disable', 'ssl-hsts-age': '49', 'ssl-hsts-include-subdomains': 'disable', 'ssl-http-location-conversion': 'enable', 'ssl-http-match-host': 'enable', 'ssl-max-version': 'ssl-3.0', 'ssl-min-version': 'ssl-3.0', 'ssl-mode': 'half', 'ssl-pfs': 'require', 'ssl-send-empty-frags': 'enable', 'ssl-server-algorithm': 'high', 'ssl-server-max-version': 'ssl-3.0', 'ssl-server-min-version': 'ssl-3.0', 'ssl-server-session-state-max': '61', 'ssl-server-session-state-timeout': '62', 'ssl-server-session-state-type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic-server': 'disable', 'websphere-server': 'disable' } set_method_mock.assert_called_with('firewall', 'vip', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_firewall_vip_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'firewall_vip': { 'random_attribute_not_valid': 'tag', 'arp_reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns_mapping_ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous_arp_interval': '10', 'http_cookie_age': '11', 'http_cookie_domain': 'test_value_12', 'http_cookie_domain_from_host': 'disable', 'http_cookie_generation': '14', 'http_cookie_path': 'test_value_15', 'http_cookie_share': 'disable', 'http_ip_header': 'enable', 'http_ip_header_name': 'test_value_18', 'http_multiplex': 'enable', 'https_cookie_secure': 'disable', 'id': '21', 'ldb_method': 'static', 'mapped_addr': 'test_value_23', 'mappedport': 'test_value_24', 'max_embryonic_connections': '25', 'name': 'default_name_26', 'nat_source_vip': 'disable', 'outlook_web_access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping_type': '1-to-1', 'protocol': 'tcp', 'server_type': 'http', 'ssl_algorithm': 'high', 'ssl_certificate': 'test_value_35', 'ssl_client_fallback': 'disable', 'ssl_client_renegotiation': 'allow', 'ssl_client_session_state_max': '38', 'ssl_client_session_state_timeout': '39', 'ssl_client_session_state_type': 'disable', 'ssl_dh_bits': '768', 'ssl_hpkp': 'disable', 'ssl_hpkp_age': '43', 'ssl_hpkp_backup': 'test_value_44', 'ssl_hpkp_include_subdomains': 'disable', 'ssl_hpkp_primary': 'test_value_46', 'ssl_hpkp_report_uri': 'test_value_47', 'ssl_hsts': 'disable', 'ssl_hsts_age': '49', 'ssl_hsts_include_subdomains': 'disable', 'ssl_http_location_conversion': 'enable', 'ssl_http_match_host': 'enable', 'ssl_max_version': 'ssl-3.0', 'ssl_min_version': 'ssl-3.0', 'ssl_mode': 'half', 'ssl_pfs': 'require', 'ssl_send_empty_frags': 'enable', 'ssl_server_algorithm': 'high', 'ssl_server_max_version': 'ssl-3.0', 'ssl_server_min_version': 'ssl-3.0', 'ssl_server_session_state_max': '61', 'ssl_server_session_state_timeout': '62', 'ssl_server_session_state_type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic_server': 'disable', 'websphere_server': 'disable' }, 'vdom': 'root'} is_error, changed, response = fortios_firewall_vip.fortios_firewall(input_data, fos_instance) expected_data = { 'arp-reply': 'disable', 'color': '4', 'comment': 'Comment.', 'dns-mapping-ttl': '6', 'extintf': 'test_value_7', 'extip': 'test_value_8', 'extport': 'test_value_9', 'gratuitous-arp-interval': '10', 'http-cookie-age': '11', 'http-cookie-domain': 'test_value_12', 'http-cookie-domain-from-host': 'disable', 'http-cookie-generation': '14', 'http-cookie-path': 'test_value_15', 'http-cookie-share': 'disable', 'http-ip-header': 'enable', 'http-ip-header-name': 'test_value_18', 'http-multiplex': 'enable', 'https-cookie-secure': 'disable', 'id': '21', 'ldb-method': 'static', 'mapped-addr': 'test_value_23', 'mappedport': 'test_value_24', 'max-embryonic-connections': '25', 'name': 'default_name_26', 'nat-source-vip': 'disable', 'outlook-web-access': 'disable', 'persistence': 'none', 'portforward': 'disable', 'portmapping-type': '1-to-1', 'protocol': 'tcp', 'server-type': 'http', 'ssl-algorithm': 'high', 'ssl-certificate': 'test_value_35', 'ssl-client-fallback': 'disable', 'ssl-client-renegotiation': 'allow', 'ssl-client-session-state-max': '38', 'ssl-client-session-state-timeout': '39', 'ssl-client-session-state-type': 'disable', 'ssl-dh-bits': '768', 'ssl-hpkp': 'disable', 'ssl-hpkp-age': '43', 'ssl-hpkp-backup': 'test_value_44', 'ssl-hpkp-include-subdomains': 'disable', 'ssl-hpkp-primary': 'test_value_46', 'ssl-hpkp-report-uri': 'test_value_47', 'ssl-hsts': 'disable', 'ssl-hsts-age': '49', 'ssl-hsts-include-subdomains': 'disable', 'ssl-http-location-conversion': 'enable', 'ssl-http-match-host': 'enable', 'ssl-max-version': 'ssl-3.0', 'ssl-min-version': 'ssl-3.0', 'ssl-mode': 'half', 'ssl-pfs': 'require', 'ssl-send-empty-frags': 'enable', 'ssl-server-algorithm': 'high', 'ssl-server-max-version': 'ssl-3.0', 'ssl-server-min-version': 'ssl-3.0', 'ssl-server-session-state-max': '61', 'ssl-server-session-state-timeout': '62', 'ssl-server-session-state-type': 'disable', 'type': 'static-nat', 'uuid': 'test_value_65', 'weblogic-server': 'disable', 'websphere-server': 'disable' } set_method_mock.assert_called_with('firewall', 'vip', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
{ "content_hash": "ebcb33cc0e81a068889eb70ff96ede68", "timestamp": "", "source": "github", "line_count": 823, "max_line_length": 142, "avg_line_length": 41.41555285540705, "alnum_prop": 0.5083467801085522, "repo_name": "thaim/ansible", "id": "a24bdd6b8f39036340e0094a41bbf938bab34622", "size": "34781", "binary": false, "copies": "20", "ref": "refs/heads/fix-broken-link", "path": "test/units/modules/network/fortios/test_fortios_firewall_vip.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
from __future__ import division from __future__ import unicode_literals import os import codecs import json import logging import tweepy import time import re import random import cPickle as pickle from httplib import IncompleteRead def ignore(method): """ Use the @ignore decorator on TwitterBot methods you wish to leave unimplemented, such as on_timeline and on_mention. """ method.not_implemented = True return method class TwitterBot: def __init__(self): self.config = {} self.custom_handlers = [] self.config['reply_direct_mention_only'] = False self.config['reply_followers_only'] = True self.config['autofav_mentions'] = False self.config['autofav_keywords'] = [] self.config['autofollow'] = False self.config['tweet_interval'] = 30 * 60 self.config['tweet_interval_range'] = None self.config['reply_interval'] = 10 self.config['reply_interval_range'] = None self.config['ignore_timeline_mentions'] = True self.config['logging_level'] = logging.DEBUG self.config['storage'] = FileStorage() self.state = {} # call the custom initialization self.bot_init() auth = tweepy.OAuthHandler(self.config['api_key'], self.config['api_secret']) auth.set_access_token(self.config['access_key'], self.config['access_secret']) self.api = tweepy.API(auth) self.id = self.api.me().id self.screen_name = self.api.me().screen_name logging.basicConfig(format='%(asctime)s | %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=self.config['log_path'] + self.screen_name + '.log', level=self.config['logging_level']) logging.info('Initializing bot...') try: with self.config['storage'].read(self.screen_name) as f: self.state = pickle.load(f) except IOError: self.state['last_timeline_id'] = 1 self.state['last_mention_id'] = 1 self.state['last_timeline_time'] = 0 self.state['last_mention_time'] = 0 self.state['last_tweet_id'] = 1 self.state['last_tweet_time'] = 1 self.state['last_reply_id'] = 0 self.state['last_reply_time'] = 0 self.state['recent_timeline'] = [] self.state['mention_queue'] = [] self.state['friends'] = self.api.friends_ids(self.id) self.state['followers'] = self.api.followers_ids(self.id) self.state['new_followers'] = [] self.state['last_follow_check'] = 0 logging.info('Bot initialized!') def bot_init(self): """ Initialize custom state values for your bot. """ raise NotImplementedError("You MUST have bot_init() implemented in your bot! What have you DONE!") def log(self, message, level=logging.INFO): if level == logging.ERROR: logging.error(message) else: logging.info(message) def _log_tweepy_error(self, message, e): try: e_message = e.message[0]['message'] code = e.message[0]['code'] self.log("{}: {} ({})".format(message, e_message, code), level=logging.ERROR) except: self.log(message, e) def _tweet_url(self, tweet): return "http://twitter.com/" + tweet.author.screen_name + "/status/" + str(tweet.id) def _save_state(self): with self.config['storage'].write(self.screen_name) as f: pickle.dump(self.state, f) self.log('Bot state saved') def on_scheduled_tweet(self): """ Post a general tweet to own timeline. """ #self.post_tweet(text) raise NotImplementedError("You need to implement this to tweet to timeline (or pass if you don't want to)!") def on_mention(self, tweet, prefix): """ Perform some action upon receiving a mention. """ #self.post_tweet(text) raise NotImplementedError("You need to implement this to reply to/fav mentions (or pass if you don't want to)!") def on_timeline(self, tweet, prefix): """ Perform some action on a tweet on the timeline. """ #self.post_tweet(text) raise NotImplementedError("You need to implement this to reply to/fav timeline tweets (or pass if you don't want to)!") def on_follow(self, f_id): """ Perform some action when followed. """ if self.config['autofollow']: try: self.api.create_friendship(f_id, follow=True) self.state['friends'].append(f_id) logging.info('Followed user id {}'.format(f_id)) except tweepy.TweepError as e: self._log_tweepy_error('Unable to follow user', e) time.sleep(3) self.state['followers'].append(f_id) def post_tweet(self, text, reply_to=None, media=None): kwargs = {} args = [text] if media is not None: cmd = self.api.update_with_media args.insert(0, media) else: cmd = self.api.update_status try: self.log('Tweeting "{}"'.format(text)) if reply_to: self.log("-- Responding to status {}".format(self._tweet_url(reply_to))) kwargs['in_reply_to_status_id'] = reply_to.id else: self.log("-- Posting to own timeline") tweet = cmd(*args, **kwargs) self.log('Status posted at {}'.format(self._tweet_url(tweet))) return True except tweepy.TweepError as e: self._log_tweepy_error('Can\'t post status', e) return False def favorite_tweet(self, tweet): try: logging.info('Faving ' + self._tweet_url(tweet)) self.api.create_favorite(tweet.id) except tweepy.TweepError as e: self._log_tweepy_error('Can\'t fav status', e) def _ignore_method(self, method): return hasattr(method, 'not_implemented') and method.not_implemented def _handle_timeline(self): """ Reads the latest tweets in the bots timeline and perform some action. self.recent_timeline """ for tweet in self.state['recent_timeline']: prefix = self.get_mention_prefix(tweet) self.on_timeline(tweet, prefix) words = tweet.text.lower().split() if any(w in words for w in self.config['autofav_keywords']): self.favorite_tweet(tweet) time.sleep(self.config['reply_interval']) def _handle_mentions(self): """ Performs some action on the mentions in self.mention_queue """ # TODO: only handle a certain number of mentions at a time? for mention in iter(self.state['mention_queue']): prefix = self.get_mention_prefix(mention) self.on_mention(mention, prefix) self.state['mention_queue'].remove(mention) if self.config['autofav_mentions']: self.favorite_tweet(mention) #time.sleep(self.config['reply_interval']) def get_mention_prefix(self, tweet): """ Returns a string of users to @-mention when responding to a tweet. """ mention_back = ['@' + tweet.author.screen_name] mention_back += [s for s in re.split('[^@\w]', tweet.text) if len(s) > 2 and s[0] == '@' and s[1:] != self.screen_name] if self.config['reply_followers_only']: mention_back = [s for s in mention_back if s[1:] in self.state['followers'] or s == '@' + tweet.author.screen_name] return ' '.join(mention_back) def _check_mentions(self): """ Checks mentions and loads most recent tweets into the mention queue """ if self._ignore_method(self.on_mention): logging.debug('Ignoring mentions') return try: current_mentions = self.api.mentions_timeline(since_id=self.state['last_mention_id'], count=100) # direct mentions only? if self.config['reply_direct_mention_only']: current_mentions = [t for t in current_mentions if re.split('[^@\w]', t.text)[0] == '@' + self.screen_name] if len(current_mentions) != 0: self.state['last_mention_id'] = current_mentions[0].id self.state['last_mention_time'] = time.time() self.state['mention_queue'] += reversed(current_mentions) logging.info('Mentions updated ({} retrieved, {} total in queue)'.format(len(current_mentions), len(self.state['mention_queue']))) except tweepy.TweepError as e: self._log_tweepy_error('Can\'t retrieve mentions', e) except IncompleteRead as e: self.log('Incomplete read error -- skipping mentions update') def _check_timeline(self): """ Checks timeline and loads most recent tweets into recent timeline """ if self._ignore_method(self.on_timeline): logging.debug('Ignoring timeline') return try: current_timeline = self.api.home_timeline(count=200, since_id=self.state['last_timeline_id']) # remove my tweets current_timeline = [t for t in current_timeline if t.author.screen_name.lower() != self.screen_name.lower()] # remove all tweets mentioning me current_timeline = [t for t in current_timeline if not re.search('@'+self.screen_name, t.text, flags=re.IGNORECASE)] if self.config['ignore_timeline_mentions']: # remove all tweets with mentions (heuristically) current_timeline = [t for t in current_timeline if '@' not in t.text] if len(current_timeline) != 0: self.state['last_timeline_id'] = current_timeline[0].id self.state['last_timeline_time'] = time.time() self.state['recent_timeline'] = list(reversed(current_timeline)) logging.info('Timeline updated ({} retrieved)'.format(len(current_timeline))) except tweepy.TweepError as e: self._log_tweepy_error('Can\'t retrieve timeline', e) except IncompleteRead as e: self.log('Incomplete read error -- skipping timeline update') def _check_followers(self): """ Checks followers. """ logging.info("Checking for new followers...") try: self.state['new_followers'] = [f_id for f_id in self.api.followers_ids(self.id) if f_id not in self.state['followers']] self.config['last_follow_check'] = time.time() except tweepy.TweepError as e: self._log_tweepy_error('Can\'t update followers', e) except IncompleteRead as e: self.log('Incomplete read error -- skipping followers update') def _handle_followers(self): """ Handles new followers. """ for f_id in self.state['new_followers']: self.on_follow(f_id) def register_custom_handler(self, action, interval): """ Register a custom action to run at some interval. """ handler = {} handler['action'] = action handler['interval'] = interval handler['last_run'] = 0 self.custom_handlers.append(handler) def run(self): """ Runs the bot! This probably shouldn't be in a "while True" lol. """ while True: # check followers every 30 minutes if (time.time() - self.state['last_follow_check']) > (30 * 60): self._check_followers() self._handle_followers() # check mentions every minute-ish #if self.reply_to_mentions and (time.time() - self.last_mention_time) > 60: if (time.time() - self.state['last_mention_time']) > 60: self._check_mentions() self._handle_mentions() # tweet to timeline #if self.reply_to_timeline and (time.time() - self.last_mention_time) > 60: if (time.time() - self.state['last_timeline_time']) > 60: self._check_timeline() self._handle_timeline() # tweet to timeline on the correct interval if (time.time() - self.state['last_tweet_time']) > self.config['tweet_interval']: self.on_scheduled_tweet() # TODO: maybe this should only run if the above is successful... if self.config['tweet_interval_range'] is not None: self.config['tweet_interval'] = random.randint(*self.config['tweet_interval_range']) self.log("Next tweet in {} seconds".format(self.config['tweet_interval'])) self.state['last_tweet_time'] = time.time() # run custom action for handler in self.custom_handlers: if (time.time() - handler['last_run']) > handler['interval']: handler['action']() handler['last_run'] = time.time() # save current state self._save_state() logging.info("Sleeping for a bit...") time.sleep(30) class FileStorage(object): """ Default storage adapter. Adapters must implement two methods: read(name) and write(name). """ def read(self, name): """ Return an IO-like object that will produce binary data when read from. If nothing is stored under the given name, raise IOError. """ filename = self._get_filename(name) if os.path.exists(filename): logging.debug("Reading from {}".format(filename)) else: logging.debug("{} doesn't exist".format(filename)) return open(filename) def write(self, name): """ Return an IO-like object that will store binary data written to it. """ filename = self._get_filename(name) if os.path.exists(filename): logging.debug("Overwriting {}".format(filename)) else: logging.debug("Creating {}".format(filename)) return open(filename, 'wb') def _get_filename(self, name): return '{}_state.pkl'.format(name)
{ "content_hash": "e5b2ae786826ac6bb17d3227135fe924", "timestamp": "", "source": "github", "line_count": 443, "max_line_length": 142, "avg_line_length": 32.830699774266364, "alnum_prop": 0.570475797579758, "repo_name": "brianshumate/robo-pirate", "id": "2a5dc92c193c84d18a38a70d3bd491357b985972", "size": "14614", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "twitterbot/bot.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "18785" } ], "symlink_target": "" }
import unittest from cpuinfo import * import helpers class TestParseCPUString(unittest.TestCase): def test_to_decimal_string(self): self.assertEqual('2.8', cpuinfo._to_decimal_string('2.80')) self.assertEqual('2.0', cpuinfo._to_decimal_string('2')) self.assertEqual('3.0', cpuinfo._to_decimal_string(3)) self.assertEqual('6.5', cpuinfo._to_decimal_string(6.5)) self.assertEqual('7.002', cpuinfo._to_decimal_string(7.002)) self.assertEqual('4.00000000001', cpuinfo._to_decimal_string('4.00000000001')) self.assertEqual('5.0', cpuinfo._to_decimal_string('5.000000000000')) self.assertEqual('0.0', cpuinfo._to_decimal_string('invalid')) self.assertEqual('0.0', cpuinfo._to_decimal_string('8.778.9')) self.assertEqual('0.0', cpuinfo._to_decimal_string('')) self.assertEqual('0.0', cpuinfo._to_decimal_string(None)) def test_hz_short_to_full(self): self.assertEqual((2800000000, 0), cpuinfo._hz_short_to_full('2.8', 9)) self.assertEqual((1200000, 0), cpuinfo._hz_short_to_full('1.2', 6)) self.assertEqual((3200000000, 0), cpuinfo._hz_short_to_full('3.2', 9)) self.assertEqual((9001200000, 0), cpuinfo._hz_short_to_full('9001.2', 6)) self.assertEqual((0, 0), cpuinfo._hz_short_to_full('0.0', 0)) self.assertEqual((2, 87), cpuinfo._hz_short_to_full('2.87', 0)) self.assertEqual((0, 0), cpuinfo._hz_short_to_full('invalid', 0)) self.assertEqual((0, 0), cpuinfo._hz_short_to_full('8.778.9', 0)) self.assertEqual((0, 0), cpuinfo._hz_short_to_full('', 0)) self.assertEqual((0, 0), cpuinfo._hz_short_to_full(None, 0)) def test_hz_friendly_to_full(self): self.assertEqual((2800000000, 0), cpuinfo._hz_friendly_to_full('2.80GHz')) self.assertEqual((1200000, 0), cpuinfo._hz_friendly_to_full('1.20 mHz')) self.assertEqual((3693150000, 0), cpuinfo._hz_friendly_to_full('3693.15-MHz')) self.assertEqual((12000000000, 0), cpuinfo._hz_friendly_to_full('12 GHz')) self.assertEqual((2, 6), cpuinfo._hz_friendly_to_full('2.6 Hz')) self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('0 Hz')) self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('invalid')) self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('8.778.9')) self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full('')) self.assertEqual((0, 0), cpuinfo._hz_friendly_to_full(None)) def test_hz_short_to_friendly(self): self.assertEqual('2.8000 GHz', cpuinfo._hz_short_to_friendly('2.8', 9)) self.assertEqual('1.2000 MHz', cpuinfo._hz_short_to_friendly('1.2', 6)) self.assertEqual('3.2000 GHz', cpuinfo._hz_short_to_friendly('3.2', 9)) self.assertEqual('1.3000 Hz', cpuinfo._hz_short_to_friendly('1.3', 0)) self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('0.0', 0)) self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('invalid', 0)) self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('8.778.9', 0)) self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly('', 0)) self.assertEqual('0.0000 Hz', cpuinfo._hz_short_to_friendly(None, 0)) def test_parse_cpu_brand_string(self): hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) CPU G640 @ 2.80GHz') self.assertEqual((hz, scale), ('2.8', 9)) hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) CPU @ 1.20MHz') self.assertEqual((hz, scale), ('1.2', 6)) # NOTE: No @ symbol hz, scale = cpuinfo._parse_cpu_brand_string('Intel(R) Pentium(R) D CPU 3.20GHz') self.assertEqual((hz, scale), ('3.2', 9)) # NOTE: No @ symbol and no Hz hz, scale = cpuinfo._parse_cpu_brand_string('AMD Ryzen 7 2700X Eight-Core Processor') self.assertEqual((hz, scale), ('0.0', 0)) def test_parse_cpu_brand_string_dx(self): hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (fam: 06, model: 2a, stepping: 07)") self.assertEqual('Intel(R) Pentium(R) CPU G640 @ 2.80GHz', brand) self.assertEqual((hz, scale), ('2.8', 9)) self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6)) hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (family: 0x6, model: 0x2a, stepping: 0x7)") self.assertEqual('Intel(R) Pentium(R) CPU G640 @ 2.80GHz', brand) self.assertEqual((hz, scale), ('2.8', 9)) self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6)) hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz") self.assertEqual("Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz", brand) self.assertEqual((hz, scale), ('2.93', 9)) self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None)) hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) CPU G640 @ 2.80GHz (2793.73-MHz K8-class CPU)") self.assertEqual("Intel(R) Pentium(R) CPU G640 @ 2.80GHz", brand) self.assertEqual((hz, scale), ('2.8', 9)) self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None)) # NOTE: No @ symbol hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("Intel(R) Pentium(R) D CPU 3.20GHz") self.assertEqual("Intel(R) Pentium(R) D CPU 3.20GHz", brand) self.assertEqual((hz, scale), ('3.2', 9)) self.assertEqual((vendor_id, stepping, model, family), (None, None, None, None)) # NOTE: No @ symbol and no Hz hz, scale, brand, vendor_id, stepping, model, family = \ cpuinfo._parse_cpu_brand_string_dx("AMD Ryzen 7 2700X Eight-Core Processor (3693.15-MHz K8-class CPU) (fam: 06, model: 2a, stepping: 07)") self.assertEqual("AMD Ryzen 7 2700X Eight-Core Processor", brand) self.assertEqual((hz, scale), ('3693.15', 6)) self.assertEqual((vendor_id, stepping, model, family), (None, 7, 42, 6))
{ "content_hash": "a103cc913a58e2d089fbdcbaa384dc54", "timestamp": "", "source": "github", "line_count": 113, "max_line_length": 149, "avg_line_length": 52.06194690265487, "alnum_prop": 0.6734659187489376, "repo_name": "workhorsy/py-cpuinfo", "id": "27fdfd1313f911154ad1f2745cfea31aa21ec6f7", "size": "5885", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_parse_cpu_string.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1860" }, { "name": "Python", "bytes": "569052" } ], "symlink_target": "" }
from sklearn.datasets import load_svmlight_file from os import listdir from os.path import isfile, join, basename, dirname from sklearn.linear_model import SGDClassifier from sklearn.svm import LinearSVC from scipy.linalg import svd from scipy.sparse import lil_matrix import numpy as np import pickle import sys from uda_common import read_pivots, find_best_c def main(args): if len(args) < 2: sys.stderr.write("Two required arguments: <data directory> <output file>\n\n") sys.exit(-1) data_dir = args[0] base_dir = dirname(data_dir) short_dir = basename(data_dir) ## data dir is pivot_name_pivots_done so we need to chop off the end: pivot_name = short_dir[:-11] pivot_logfile = join(dirname(data_dir), pivot_name + '_pivots_done.txt') sys.stderr.write("Reading input file names from %s\n" % pivot_logfile) files = [] f = open(pivot_logfile, 'r') for line in f: line = line.rstrip() files.append(line[13:]) f.close() ## Read the base data file so we get an absolute count of features: all_X, _ = load_svmlight_file(join(base_dir, 'training-data_reduced.liblinear0')) num_feats = all_X.shape[1] #files = [join(data_dir,f) for f in listdir(data_dir) if f.endswith("liblinear")] weight_matrix = None for ind,f in enumerate(files): sys.stderr.write("Loading file %s for classification\n" % (f)) ## Since the script that created these idd not have domain index ## variables we don't need to worry about them here X_train, y_train = load_svmlight_file(f, n_features=num_feats) prevalence = y_train.sum() ## Weight matrix is supposed to be n x p, n non-pivot features by p pivot features ## Here we just zeroed out all the pivot features in the pre-process, so we ## will actually have m x p but with <=n non-zero features. if weight_matrix is None: # num_feats = X_train.shape[1] weight_matrix = np.zeros((num_feats, len(files)), dtype=np.float16) clf = SGDClassifier(loss="modified_huber", fit_intercept=False, random_state=718, max_iter=50, alpha=0.1) # clf = LinearSVC(fit_intercept=False) best_c, best_score = find_best_c(X_train, y_train, C_list=[0.1,1], pos_label=1) sys.stderr.write(' Best F score for predicting this feature is %f with prevalence %f\n' % (best_score, prevalence)) clf.fit(X_train, y_train) coefs_out = open(join(data_dir, basename(f).replace('liblinear','model') ), 'wb') pickle.dump(clf, coefs_out) coefs_out.close() weight_matrix[:,ind] = clf.coef_ neg_inds = np.where(weight_matrix < 0) weight_matrix[neg_inds] = 0 sys.stderr.write('Writing full theta matrix\n') full_out = open(args[1], 'wb') pickle.dump(weight_matrix, full_out) full_out.close() if __name__ == '__main__': args = sys.argv[1:] main(args)
{ "content_hash": "06fea4ff952d7ad4b24be3dbd477454b", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 123, "avg_line_length": 40.94444444444444, "alnum_prop": 0.6485753052917232, "repo_name": "tmills/uda", "id": "6efc59da7d96a9109e08b73628d777e778d23880", "size": "2966", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/learn_scl_weights.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "4051" }, { "name": "Python", "bytes": "226010" }, { "name": "Shell", "bytes": "701" } ], "symlink_target": "" }
import collections import json import logging import time import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.error, urllib.parse from base.APIError import APIError from base.APIHostSwitch import * _MAX_BACKOFF_DELAY = 1024000 class JsonDict(dict): def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(r"'JsonDict' object has no attribute %s'" % item) def __setattr__(self, key, value): self[key] = value def _parse_json(body): """ convert json object to python object :param body: response data """ def _obj_hook(pairs): o = JsonDict() for k, v in pairs.items(): o[str(k)] = v return o return json.loads(body, object_hook=_obj_hook) def _build_request_url(server, request_path): return Constants.http_protocol + "://" + server.host + request_path[0] def _http_call(url, method, authorization, token, **kw): """ :param url: http request url :param method: http request method :param authorization: push authorization :param kw: params """ params = urllib.parse.urlencode(_encode_params(**kw)).encode('utf-8') http_url = '%s?%s' % (url, params) if method == Constants.__HTTP_GET__ else url http_body = None if method == Constants.__HTTP_GET__ else params req = urllib.request.Request(http_url, data=http_body) if authorization: req.add_header('Authorization', 'key=%s' % authorization) if token: req.add_header('X-PUSH-AUDIT-TOKEN', token) if Constants.auto_switch_host and ServerSwitch().need_refresh_host_list(): req.add_header('X-PUSH-HOST-LIST', 'true') req.add_header('Content-Type', 'application/x-www-form-urlencoded;charset=UTF-8') try: resp = urllib.request.urlopen(req, timeout=5) headers = resp.getheaders() host_list = None for header in headers: if header[0] == 'X-PUSH-HOST-LIST': host_list = header[1] if host_list: ServerSwitch().initialize(host_list) r = _parse_json(resp.read().decode()) if hasattr(r, 'code'): if r.code != 0: raise APIError(r.code, r.get('description', ''), r.get('reason', '')) return r except urllib.error.URLError as e: raise APIError('-5', e.read(), 'http error') def _encode_params(**kw): args = {} for k, v in kw.items(): if isinstance(v, str): qv = v.encode('utf-8') if isinstance(v, str) else v args['%s' % k] = qv elif isinstance(v, collections.Iterable): for i in v: qv = i.encode('utf-8') if isinstance(i, str) else str(i) args['%s' % k] = qv else: qv = str(v) args['%s' % k] = qv return args class Base(object): def __init__(self, security, token=None): self.security = security self.token = token self.proxy_ip = None self.proxy_port = None self.proxy = False def set_proxy(self, proxy_ip, proxy_port): self.proxy_ip = proxy_ip self.proxy_port = proxy_port self.proxy = True def set_token(self, token): self.token = token def _call_request(self, request_path, method, **kw): """ call http request(include auto select server) :param request_path: http interface :param method: GET|POST :param kw: params """ start = time.time() server = ServerSwitch().select_server(request_path) self.build_proxy() request_url = _build_request_url(server, request_path) try: ret = _http_call(request_url, method, self.security, self.token, **kw) if time.time() - start > 5: server.decr_priority() else: server.incr_priority() return ret except APIError as ex: logging.error("%s request: [%s] error [%s]" % (Constants.http_protocol, request_url, ex)) server.decr_priority() raise ex def http_post(self, request_path, **kw): logging.info("POST %s" % request_path[0]) return self._call_request(request_path, Constants.__HTTP_POST__, **kw) def http_get(self, request_path, **kw): logging.info("GET %s" % request_path[0]) return self._call_request(request_path, Constants.__HTTP_GET__, **kw) def build_proxy(self): if self.proxy: opener = urllib.request.build_opener(urllib.request.ProxyHandler({"%s:%s" % (self.proxy_ip, self.proxy_port)}), urllib.request.HTTPHandler(debuglevel=1)) urllib.request.install_opener(opener) def _try_http_request(self, request_path, retry_times, method=Constants.__HTTP_POST__, **kw): is_fail, try_time, result, sleep_time = True, 0, None, 1 while is_fail and try_time < retry_times: try: if method == Constants.__HTTP_POST__: result = self.http_post(request_path, **kw) elif method == Constants.__HTTP_GET__: result = self.http_get(request_path, **kw) else: raise APIError('-2', 'not support %s http request' % method, 'http error') is_fail = False except APIError as ex: ''' URLError failure retry ''' if ex.error_code == '-5': is_fail = True logging.error('code:[%s] - description:[%s] - reason:[%s]' % (ex.error_code, ex.error, ex.request)) try_time += 1 time.sleep(sleep_time) if 2 * sleep_time < _MAX_BACKOFF_DELAY: sleep_time *= 2 if not result: raise APIError('-3', 'retry %s time failure' % retry_times, 'request error') return result
{ "content_hash": "86dcb80cb493d85cfa2a4b933b2b66c9", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 123, "avg_line_length": 34.982658959537574, "alnum_prop": 0.5581625908790483, "repo_name": "jerryjobs/thirdpartPushSystem", "id": "7533ed6dbfb673c822a23bead02ff3c42a5c6f36", "size": "6052", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "push/mipush/build/lib/base/APISenderBase.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "82155" }, { "name": "HTML", "bytes": "5277" }, { "name": "Python", "bytes": "728813" }, { "name": "Shell", "bytes": "68" } ], "symlink_target": "" }
from django.db import models from django.utils.translation import ugettext_lazy as _ from generic import ConfigModel from bbotui import settings class BuildSlave(ConfigModel): """ A Build Slave """ project = models.ForeignKey("Project", verbose_name=_("project")) name = models.CharField(_("name"), max_length=50) description = models.CharField(_("description"), max_length=200, blank=True) password = models.CharField(_("password"), max_length=12) max_builds = models.IntegerField( _("max simultaneous builds"), help_text = _("0 for unlimitted"), default = getattr(settings, "DEFAULT_SLAVE_MAXBUILD"), ) timeout = models.IntegerField( _("timeout before slave considered MISSING"), help_text = _("in minutes"), default = getattr(settings, "DEFAULT_SLAVE_TIMEOUT"), ) # this defines who gets informed when slave goes missing # admins with receive_slave_events=True will also be informed admins = models.ManyToManyField("BuildAdmin", verbose_name = _("administrators"), ) class Meta: app_label = 'bbotui' verbose_name = _("build slave") verbose_name_plural = _("build slaves") # a project cannot have multiple slaves with the same name unique_together = (("name", "project")) def __unicode__(self): return self.name def get_config_type(self): return _("build slave") def get_config_class(self): return ("BuildSlave", "buildbot.buildslave") def get_config_args(self): notify_list = [] # add emails of admin subscribed specifically to this slave for admin in self.admins.all(): notify_list.append(str(admin.email)) # add admins that want to be notified on all slave events for admin in self.project.buildadmin_set.filter(receive_slave_events=True): email = str(admin.email) if email not in notify_list: notify_list.append(email) if notify_list: notify_list.sort() return { "name" : self.name, "password" : self.password, "missing_timeout" : self.timeout * 60, "max_builds" : self.max_builds, "notify_on_missing" : notify_list, }
{ "content_hash": "4d3599708870e5cb4832e6a97c1d644b", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 83, "avg_line_length": 34.38028169014085, "alnum_prop": 0.5854158131913151, "repo_name": "shawnchin/bbotui", "id": "4803b01a137e31b65c0c1d36ab70e14201684502", "size": "2441", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/bbotui/models/buildslave.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "9829" }, { "name": "Python", "bytes": "218335" }, { "name": "Shell", "bytes": "1824" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function from unittest import TestCase, main import numpy as np import numpy.testing as npt import pandas as pd from pandas.util.testing import assert_frame_equal from skbio import DistanceMatrix from skbio.stats.distance import (DissimilarityMatrixError, DistanceMatrixError, mantel, pwmantel) from skbio.stats.distance._mantel import _order_dms from skbio.util import get_data_path class MantelTestData(TestCase): def setUp(self): # Small dataset of minimal size (3x3). Mix of floats and ints in a # native Python nested list structure. self.minx = [[0, 1, 2], [1, 0, 3], [2, 3, 0]] self.miny = [[0, 2, 7], [2, 0, 6], [7, 6, 0]] self.minz = [[0, 0.5, 0.25], [0.5, 0, 0.1], [0.25, 0.1, 0]] # Version of the above dataset stored as DistanceMatrix instances. self.minx_dm = DistanceMatrix(self.minx) self.miny_dm = DistanceMatrix(self.miny) self.minz_dm = DistanceMatrix(self.minz) # Versions of self.minx_dm and self.minz_dm that each have an extra ID # on the end. self.minx_dm_extra = DistanceMatrix([[0, 1, 2, 7], [1, 0, 3, 2], [2, 3, 0, 4], [7, 2, 4, 0]], ['0', '1', '2', 'foo']) self.minz_dm_extra = DistanceMatrix([[0, 0.5, 0.25, 3], [0.5, 0, 0.1, 24], [0.25, 0.1, 0, 5], [3, 24, 5, 0]], ['0', '1', '2', 'bar']) class MantelTests(MantelTestData): """Results were verified with R 3.1.0 and vegan 2.0-10 (vegan::mantel). vegan::mantel performs a one-sided (greater) test and does not have the option to specify different alternative hypotheses. In order to test the other alternative hypotheses, I modified vegan::mantel to perform the appropriate test, source()'d the file and verified the output. """ def setUp(self): super(MantelTests, self).setUp() self.methods = ('pearson', 'spearman') self.alternatives = ('two-sided', 'greater', 'less') # No variation in distances. Taken from Figure 10.20(b), pg. 603 in L&L # 3rd edition. Their example is 4x4 but using 3x3 here for easy # comparison to the minimal dataset above. self.no_variation = [[0, 0.667, 0.667], [0.667, 0, 0.667], [0.667, 0.667, 0]] # This second dataset is derived from vegan::mantel's example dataset. # The "veg" distance matrix contains Bray-Curtis distances derived from # the varespec data (named "veg.dist" in the example). The "env" # distance matrix contains Euclidean distances derived from scaled # varechem data (named "env.dist" in the example). self.veg_dm_vegan = np.loadtxt( get_data_path('mantel_veg_dm_vegan.txt')) self.env_dm_vegan = np.loadtxt( get_data_path('mantel_env_dm_vegan.txt')) # Expected test statistic when comparing x and y with method='pearson'. self.exp_x_vs_y = 0.7559289 # Expected test statistic when comparing x and z with method='pearson'. self.exp_x_vs_z = -0.9897433 def test_statistic_same_across_alternatives_and_permutations(self): # Varying permutations and alternative hypotheses shouldn't affect the # computed test statistics. for n in (0, 99, 999): for alt in self.alternatives: for method, exp in (('pearson', self.exp_x_vs_y), ('spearman', 0.5)): obs = mantel(self.minx, self.miny, method=method, permutations=n, alternative=alt)[0] self.assertAlmostEqual(obs, exp) def test_comparing_same_matrices(self): for method in self.methods: obs = mantel(self.minx, self.minx, method=method)[0] self.assertAlmostEqual(obs, 1) obs = mantel(self.miny, self.miny, method=method)[0] self.assertAlmostEqual(obs, 1) def test_negative_correlation(self): for method, exp in (('pearson', self.exp_x_vs_z), ('spearman', -1)): obs = mantel(self.minx, self.minz, method=method)[0] self.assertAlmostEqual(obs, exp) def test_zero_permutations(self): for alt in self.alternatives: for method, exp in (('pearson', self.exp_x_vs_y), ('spearman', 0.5)): obs = mantel(self.minx, self.miny, permutations=0, method=method, alternative=alt) self.assertAlmostEqual(obs[0], exp) npt.assert_equal(obs[1], np.nan) self.assertEqual(obs[2], 3) # swapping order of matrices should give same result obs = mantel(self.miny, self.minx, permutations=0, method=method, alternative=alt) self.assertAlmostEqual(obs[0], exp) npt.assert_equal(obs[1], np.nan) self.assertEqual(obs[2], 3) def test_distance_matrix_instances_as_input(self): # Matrices with all matching IDs in the same order. np.random.seed(0) obs = mantel(self.minx_dm, self.miny_dm, alternative='less') self.assertAlmostEqual(obs[0], self.exp_x_vs_y) self.assertAlmostEqual(obs[1], 0.843) self.assertEqual(obs[2], 3) def test_distance_matrix_instances_with_reordering_and_nonmatching(self): x = self.minx_dm_extra.filter(['1', '0', 'foo', '2']) y = self.miny_dm.filter(['0', '2', '1']) # strict=True should disallow IDs that aren't found in both matrices with self.assertRaises(ValueError): mantel(x, y, alternative='less', strict=True) np.random.seed(0) # strict=False should ignore IDs that aren't found in both matrices obs = mantel(x, y, alternative='less', strict=False) self.assertAlmostEqual(obs[0], self.exp_x_vs_y) self.assertAlmostEqual(obs[1], 0.843) self.assertEqual(obs[2], 3) def test_distance_matrix_instances_with_lookup(self): self.minx_dm.ids = ('a', 'b', 'c') self.miny_dm.ids = ('d', 'e', 'f') lookup = {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'A', 'e': 'B', 'f': 'C'} np.random.seed(0) obs = mantel(self.minx_dm, self.miny_dm, alternative='less', lookup=lookup) self.assertAlmostEqual(obs[0], self.exp_x_vs_y) self.assertAlmostEqual(obs[1], 0.843) self.assertEqual(obs[2], 3) def test_one_sided_greater(self): np.random.seed(0) obs = mantel(self.minx, self.miny, alternative='greater') self.assertAlmostEqual(obs[0], self.exp_x_vs_y) self.assertAlmostEqual(obs[1], 0.324) self.assertEqual(obs[2], 3) obs = mantel(self.minx, self.minx, alternative='greater') self.assertAlmostEqual(obs[0], 1) self.assertAlmostEqual(obs[1], 0.172) self.assertEqual(obs[2], 3) def test_one_sided_less(self): # no need to seed here as permuted test statistics will all be less # than or equal to the observed test statistic (1.0) for method in self.methods: obs = mantel(self.minx, self.minx, method=method, alternative='less') self.assertEqual(obs, (1, 1, 3)) np.random.seed(0) obs = mantel(self.minx, self.miny, alternative='less') self.assertAlmostEqual(obs[0], self.exp_x_vs_y) self.assertAlmostEqual(obs[1], 0.843) self.assertEqual(obs[2], 3) obs = mantel(self.minx, self.minz, alternative='less') self.assertAlmostEqual(obs[0], self.exp_x_vs_z) self.assertAlmostEqual(obs[1], 0.172) self.assertEqual(obs[2], 3) def test_two_sided(self): np.random.seed(0) obs = mantel(self.minx, self.minx, method='spearman', alternative='two-sided') self.assertEqual(obs[0], 1) self.assertAlmostEqual(obs[1], 0.328) self.assertEqual(obs[2], 3) obs = mantel(self.minx, self.miny, method='spearman', alternative='two-sided') self.assertAlmostEqual(obs[0], 0.5) self.assertAlmostEqual(obs[1], 1.0) self.assertEqual(obs[2], 3) obs = mantel(self.minx, self.minz, method='spearman', alternative='two-sided') self.assertAlmostEqual(obs[0], -1) self.assertAlmostEqual(obs[1], 0.322) self.assertEqual(obs[2], 3) def test_vegan_example(self): np.random.seed(0) # pearson obs = mantel(self.veg_dm_vegan, self.env_dm_vegan, alternative='greater') self.assertAlmostEqual(obs[0], 0.3047454) self.assertAlmostEqual(obs[1], 0.002) self.assertEqual(obs[2], 24) # spearman obs = mantel(self.veg_dm_vegan, self.env_dm_vegan, alternative='greater', method='spearman') self.assertAlmostEqual(obs[0], 0.283791) self.assertAlmostEqual(obs[1], 0.003) self.assertEqual(obs[2], 24) def test_no_variation_pearson(self): # Output doesn't match vegan::mantel with method='pearson'. Consider # revising output and this test depending on outcome of # https://github.com/scipy/scipy/issues/3728 for alt in self.alternatives: # test one or both inputs having no variation in their # distances obs = mantel(self.miny, self.no_variation, method='pearson', alternative=alt) npt.assert_equal(obs, (0.0, 1.0, 3)) obs = mantel(self.no_variation, self.miny, method='pearson', alternative=alt) npt.assert_equal(obs, (0.0, 1.0, 3)) obs = mantel(self.no_variation, self.no_variation, method='pearson', alternative=alt) npt.assert_equal(obs, (1.0, 1.0, 3)) def test_no_variation_spearman(self): exp = (np.nan, np.nan, 3) for alt in self.alternatives: obs = mantel(self.miny, self.no_variation, method='spearman', alternative=alt) npt.assert_equal(obs, exp) obs = mantel(self.no_variation, self.miny, method='spearman', alternative=alt) npt.assert_equal(obs, exp) obs = mantel(self.no_variation, self.no_variation, method='spearman', alternative=alt) npt.assert_equal(obs, exp) def test_no_side_effects(self): minx = np.asarray(self.minx, dtype='float') miny = np.asarray(self.miny, dtype='float') minx_copy = np.copy(minx) miny_copy = np.copy(miny) mantel(minx, miny) # Make sure we haven't modified the input. npt.assert_equal(minx, minx_copy) npt.assert_equal(miny, miny_copy) def test_invalid_distance_matrix(self): # Single asymmetric, non-hollow distance matrix. with self.assertRaises(DissimilarityMatrixError): mantel([[1, 2], [3, 4]], [[0, 0], [0, 0]]) # Two asymmetric distance matrices. with self.assertRaises(DistanceMatrixError): mantel([[0, 2], [3, 0]], [[0, 1], [0, 0]]) def test_invalid_input(self): # invalid correlation method with self.assertRaises(ValueError): mantel([[1]], [[1]], method='brofist') # invalid permutations with self.assertRaises(ValueError): mantel([[1]], [[1]], permutations=-1) # invalid alternative with self.assertRaises(ValueError): mantel([[1]], [[1]], alternative='no cog yay') # too small dms with self.assertRaises(ValueError): mantel([[0, 3], [3, 0]], [[0, 2], [2, 0]]) class PairwiseMantelTests(MantelTestData): def setUp(self): super(PairwiseMantelTests, self).setUp() self.min_dms = (self.minx_dm, self.miny_dm, self.minz_dm) self.exp_results_minimal = pd.read_csv( get_data_path('pwmantel_exp_results_minimal.txt'), sep='\t', index_col=(0, 1)) self.exp_results_minimal_with_labels = pd.read_csv( get_data_path('pwmantel_exp_results_minimal_with_labels.txt'), sep='\t', index_col=(0, 1)) self.exp_results_duplicate_dms = pd.read_csv( get_data_path('pwmantel_exp_results_duplicate_dms.txt'), sep='\t', index_col=(0, 1)) self.exp_results_na_p_value = pd.read_csv( get_data_path('pwmantel_exp_results_na_p_value.txt'), sep='\t', index_col=(0, 1)) self.exp_results_reordered_distance_matrices = pd.read_csv( get_data_path('pwmantel_exp_results_reordered_distance_matrices' '.txt'), sep='\t', index_col=(0, 1)) self.exp_results_dm_dm2 = pd.read_csv( get_data_path('pwmantel_exp_results_dm_dm2.txt'), sep='\t', index_col=(0, 1)) self.exp_results_all_dms = pd.read_csv( get_data_path('pwmantel_exp_results_all_dms.txt'), sep='\t', index_col=(0, 1)) def test_minimal_compatible_input(self): # Matrices are already in the correct order and have matching IDs. np.random.seed(0) # input as DistanceMatrix instances obs = pwmantel(self.min_dms, alternative='greater') assert_frame_equal(obs, self.exp_results_minimal) np.random.seed(0) # input as array_like obs = pwmantel((self.minx, self.miny, self.minz), alternative='greater') assert_frame_equal(obs, self.exp_results_minimal) def test_minimal_compatible_input_with_labels(self): np.random.seed(0) obs = pwmantel(self.min_dms, alternative='greater', labels=('minx', 'miny', 'minz')) assert_frame_equal(obs, self.exp_results_minimal_with_labels) def test_duplicate_dms(self): obs = pwmantel((self.minx_dm, self.minx_dm, self.minx_dm), alternative='less') assert_frame_equal(obs, self.exp_results_duplicate_dms) def test_na_p_value(self): obs = pwmantel((self.miny_dm, self.minx_dm), method='spearman', permutations=0) assert_frame_equal(obs, self.exp_results_na_p_value) def test_reordered_distance_matrices(self): # Matrices have matching IDs but they all have different ordering. x = self.minx_dm.filter(['1', '0', '2']) y = self.miny_dm.filter(['0', '2', '1']) z = self.minz_dm.filter(['1', '2', '0']) np.random.seed(0) obs = pwmantel((x, y, z), alternative='greater') assert_frame_equal(obs, self.exp_results_reordered_distance_matrices) def test_strict(self): # Matrices have some matching and nonmatching IDs, with different # ordering. x = self.minx_dm_extra.filter(['1', '0', 'foo', '2']) y = self.miny_dm.filter(['0', '2', '1']) z = self.minz_dm_extra.filter(['bar', '1', '2', '0']) np.random.seed(0) # strict=False should discard IDs that aren't found in both matrices obs = pwmantel((x, y, z), alternative='greater', strict=False) assert_frame_equal(obs, self.exp_results_reordered_distance_matrices) def test_id_lookup(self): # Matrices have mismatched IDs but a lookup is provided. self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo'] self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar'] lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo', 'd': '0', 'e': '1', 'f': '2', 'bar': 'bar', '0': '0', '1': '1', '2': '2'} x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c']) y = self.miny_dm.filter(['0', '2', '1']) z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd']) x_copy = x.copy() y_copy = y.copy() z_copy = z.copy() np.random.seed(0) obs = pwmantel((x, y, z), alternative='greater', strict=False, lookup=lookup) assert_frame_equal(obs, self.exp_results_reordered_distance_matrices) # Make sure the inputs aren't modified. self.assertEqual(x, x_copy) self.assertEqual(y, y_copy) self.assertEqual(z, z_copy) def test_too_few_dms(self): with self.assertRaises(ValueError): pwmantel([self.miny_dm]) def test_wrong_number_of_labels(self): with self.assertRaises(ValueError): pwmantel(self.min_dms, labels=['foo', 'bar']) def test_duplicate_labels(self): with self.assertRaises(ValueError): pwmantel(self.min_dms, labels=['foo', 'bar', 'foo']) def test_mixed_input_types(self): # DistanceMatrix, DistanceMatrix, array_like with self.assertRaises(TypeError): pwmantel((self.miny_dm, self.minx_dm, self.minz)) def test_filepaths_as_input(self): dms = [ get_data_path('dm.txt'), get_data_path('dm2.txt'), ] np.random.seed(0) obs = pwmantel(dms) assert_frame_equal(obs, self.exp_results_dm_dm2) def test_many_filepaths_as_input(self): dms = [ get_data_path('dm2.txt'), get_data_path('dm.txt'), get_data_path('dm4.txt'), get_data_path('dm3.txt') ] np.random.seed(0) obs = pwmantel(dms) assert_frame_equal(obs, self.exp_results_all_dms) class OrderDistanceMatricesTests(MantelTestData): def setUp(self): super(OrderDistanceMatricesTests, self).setUp() def test_array_like_input(self): obs = _order_dms(self.minx, self.miny) self.assertEqual(obs, (self.minx_dm, self.miny_dm)) def test_reordered_distance_matrices(self): # All matching IDs but with different orderings. x = self.minx_dm.filter(['1', '0', '2']) y = self.miny_dm.filter(['0', '2', '1']) exp = (x, y.filter(['1', '0', '2'])) obs = _order_dms(x, y) self.assertEqual(obs, exp) def test_reordered_and_nonmatching_distance_matrices(self): # Some matching and nonmatching IDs, with different ordering. x = self.minx_dm_extra.filter(['1', '0', 'foo', '2']) z = self.minz_dm_extra.filter(['bar', '0', '2', '1']) exp = (x.filter(['1', '0', '2']), z.filter(['1', '0', '2'])) obs = _order_dms(x, z, strict=False) self.assertEqual(obs, exp) def test_id_lookup(self): # Matrices have mismatched IDs but a lookup is provided. self.minx_dm_extra.ids = ['a', 'b', 'c', 'foo'] self.minz_dm_extra.ids = ['d', 'e', 'f', 'bar'] lookup = {'a': '0', 'b': '1', 'c': '2', 'foo': 'foo', 'd': '0', 'e': '1', 'f': '2', 'bar': 'bar'} x = self.minx_dm_extra.filter(['b', 'a', 'foo', 'c']) z = self.minz_dm_extra.filter(['bar', 'e', 'f', 'd']) x_copy = x.copy() z_copy = z.copy() exp = (self.minx_dm.filter(['1', '0', '2']), self.minz_dm.filter(['1', '0', '2'])) obs = _order_dms(x, z, strict=False, lookup=lookup) self.assertEqual(obs, exp) # Make sure the inputs aren't modified. self.assertEqual(x, x_copy) self.assertEqual(z, z_copy) def test_lookup_with_array_like(self): lookup = {'0': 'a', '1': 'b', '2': 'c'} with self.assertRaises(ValueError): _order_dms(self.minx, self.miny, lookup=lookup) def test_shape_mismatch(self): with self.assertRaises(ValueError): _order_dms(self.minx, [[0, 2], [2, 0]]) def test_missing_ids_in_lookup(self): # Mapping for '1' is missing. Should get an error while remapping IDs # for the first distance matrix. lookup = {'0': 'a', '2': 'c'} with self.assertRaisesRegexp(KeyError, "first.*(x).*'1'\"$"): _order_dms(self.minx_dm, self.miny_dm, lookup=lookup) # Mapping for 'bar' is missing. Should get an error while remapping IDs # for the second distance matrix. lookup = {'0': 'a', '1': 'b', '2': 'c', 'foo': 'a', 'baz': 'c'} self.miny_dm.ids = ('foo', 'bar', 'baz') with self.assertRaisesRegexp(KeyError, "second.*(y).*'bar'\"$"): _order_dms(self.minx_dm, self.miny_dm, lookup=lookup) def test_nonmatching_ids_strict_true(self): with self.assertRaises(ValueError): _order_dms(self.minx_dm, self.minz_dm_extra, strict=True) def test_no_matching_ids(self): self.minx_dm.ids = ['foo', 'bar', 'baz'] self.miny_dm.ids = ['a', 'b', 'c'] with self.assertRaises(ValueError): _order_dms(self.minx_dm, self.miny_dm, strict=False) def test_mixed_input_types(self): with self.assertRaises(TypeError): _order_dms(self.minx, self.minz_dm) with self.assertRaises(TypeError): _order_dms(self.minz_dm, self.minx) if __name__ == '__main__': main()
{ "content_hash": "6fc7cff567bbee16f5342b10c060c48d", "timestamp": "", "source": "github", "line_count": 561, "max_line_length": 79, "avg_line_length": 38.51693404634581, "alnum_prop": 0.5592373195112921, "repo_name": "jensreeder/scikit-bio", "id": "cff8f4cf024114d8ad118a84c939d95484f1dcc5", "size": "21962", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "skbio/stats/distance/tests/test_mantel.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "39087" }, { "name": "CSS", "bytes": "4379" }, { "name": "Groff", "bytes": "259" }, { "name": "Makefile", "bytes": "585" }, { "name": "Python", "bytes": "1736363" } ], "symlink_target": "" }
from django.core.management import BaseCommand from django.conf import settings from selenium import webdriver from pyquery.pyquery import PyQuery as pq import os, datetime, time import requests, urllib, redis from excel.models import CrawlExcel from account.models import PhoneUserProfile from django.contrib.auth.models import User url = 'http://zyd.zhaogang.com/ziyuan.html' download_url = 'http://zyddownload.zhaogang.com/Ajax/DownLoad/ZydDownLoad.ashx?callback=?&PKID=%s' class Command(BaseCommand): def handle(self, *args, **kwargs): print '开始下载找钢网资源单...' driver = webdriver.PhantomJS() if not os.path.exists(settings.CRAWL_ROOT): os.mkdir(settings.CRAWL_ROOT) print '新建目录: %s' % settings.CRAWL_ROOT today = datetime.datetime.now().strftime('%Y_%m_%d') today_dir = os.path.join(settings.CRAWL_ROOT, today) if not os.path.exists(today_dir): os.mkdir(today_dir) print '新建目录: %s' % today_dir zhaogang_dir = os.path.join(today_dir, 'zhaogang') if not os.path.exists(zhaogang_dir): os.mkdir(zhaogang_dir) print '新建目录: %s' % zhaogang_dir try: profile = PhoneUserProfile.objects.get(nickname=u'找钢网资源单', status=2) except PhoneUserProfile.DoesNotExist: user = User.objects.create_user('__zhaogang', '__zhaogang') profile = PhoneUserProfile.objects.create( user=user, phone='-', qq='-', nickname=u'找钢网资源单', status=2 ) print '系统用户已生成' driver.get(url) time.sleep(2) q = pq(driver.page_source) pages = int(q('.total').text()[1:-1]) print '一共%d页' % pages for page in range(1, pages+1): driver.get(url+'?p=%d'%page) print '第%d页' % page time.sleep(2) q = pq(driver.page_source) q = q('table tr') for _ in q[1:]: excel_id = pq(pq(_).find('a')[-2]).attr('vals') excel_provider = pq(pq(_).find('.company')[0]).text() if CrawlExcel.objects.filter(source=1, source_id=excel_id).exists(): continue try: time.sleep(2) r = requests.get(download_url % excel_id, stream=True) file_name = urllib.unquote(r.headers['content-disposition'].split('=')[1]) print '下载中: [%s] %s' % (excel_id, file_name) file_path = os.path.join(zhaogang_dir, file_name) f = open(file_path, 'wb') for block in r.iter_content(1024): if not block: break f.write(block) f.close() CrawlExcel.objects.create( create_time=time.time(), crawl_user=profile.user, source=1, source_id=excel_id, provider=excel_provider, filepath=file_path, imported=False ) except Exception as e: print '错误: [%s]' % excel_id import traceback traceback.print_exc() driver.close()
{ "content_hash": "ade557a59f076dab5eb43b1945c81c69", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 98, "avg_line_length": 32.93333333333333, "alnum_prop": 0.507807981492192, "repo_name": "stone5495/zebra", "id": "2295e068e7565875981e893066c877a07440dab9", "size": "3575", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/excel/management/commands/crawl_zhaogang.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "30" }, { "name": "HTML", "bytes": "16980" }, { "name": "Python", "bytes": "87002" } ], "symlink_target": "" }
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Script to create Chrome Installer archive. This script is used to create an archive of all the files required for a Chrome install in appropriate directory structure. It reads chrome.release file as input, creates chrome.7z archive, compresses setup.exe and generates packed_files.txt for mini_installer project. """ import ConfigParser import glob import optparse import os import shutil import subprocess import sys ARCHIVE_DIR = "installer_archive" # suffix to uncompresed full archive file, appended to options.output_name ARCHIVE_SUFFIX = ".7z" BSDIFF_EXEC = "bsdiff.exe" CHROME_DIR = "Chrome-bin" CHROME_PATCH_FILE_SUFFIX = "_patch" # prefixed by options.output_name # compressed full archive suffix, will be prefixed by options.output_name COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z" COMPRESSED_FILE_EXT = ".packed.7z" # extension of patch archive file COURGETTE_EXEC = "courgette.exe" MINI_INSTALLER_INPUT_FILE = "packed_files.txt" PATCH_FILE_EXT = '.diff' SETUP_EXEC = "setup.exe" SETUP_PATCH_FILE_PREFIX = "setup_patch" TEMP_ARCHIVE_DIR = "temp_installer_archive" VERSION_FILE = "VERSION" def BuildVersion(build_dir): """Returns the full build version string constructed from information in VERSION_FILE. Any segment not found in that file will default to '0'. """ major = 0 minor = 0 build = 0 patch = 0 for line in open(os.path.join(build_dir, '../../chrome', VERSION_FILE), 'r'): line = line.rstrip() if line.startswith('MAJOR='): major = line[6:] elif line.startswith('MINOR='): minor = line[6:] elif line.startswith('BUILD='): build = line[6:] elif line.startswith('PATCH='): patch = line[6:] return '%s.%s.%s.%s' % (major, minor, build, patch) def CompressUsingLZMA(build_dir, compressed_file, input_file): lzma_exec = GetLZMAExec(build_dir) cmd = [lzma_exec, 'a', '-t7z', # Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe # pre-filter). This results in a ~2.3MB decrease in installer size on # a 24MB installer. # Additionally, these settings reflect a 7zip 4.42 and up change in # the definition of -mx9, increasting the dicionary size moving to # 26bit = 64MB. This results in an additional ~3.5MB decrease. # Older 7zip versions can support these settings, as these changes # rely on existing functionality in the lzma format. '-m0=BCJ2', '-m1=LZMA:d27:fb128', '-m2=LZMA:d22:fb128:mf=bt2', '-m3=LZMA:d22:fb128:mf=bt2', '-mb0:1', '-mb0s1:2', '-mb0s2:3', compressed_file, input_file,] if os.path.exists(compressed_file): os.remove(compressed_file) RunSystemCommand(cmd) def CopyAllFilesToStagingDir(config, distribution, staging_dir, build_dir, enable_hidpi, enable_touch_ui): """Copies the files required for installer archive. Copies all common files required for various distributions of Chromium and also files for the specific Chromium build specified by distribution. """ CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir) if distribution: if len(distribution) > 1 and distribution[0] == '_': distribution = distribution[1:] CopySectionFilesToStagingDir(config, distribution.upper(), staging_dir, build_dir) if enable_hidpi == '1': CopySectionFilesToStagingDir(config, 'HIDPI', staging_dir, build_dir) if enable_touch_ui == '1': CopySectionFilesToStagingDir(config, 'TOUCH', staging_dir, build_dir) def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir): """Copies installer archive files specified in section from src_dir to staging_dir. This method reads section from config and copies all the files specified from src_dir to staging dir. """ for option in config.options(section): if option.endswith('dir'): continue dst_dir = os.path.join(staging_dir, config.get(section, option)) src_paths = glob.glob(os.path.join(src_dir, option)) if src_paths and not os.path.exists(dst_dir): os.makedirs(dst_dir) for src_path in src_paths: dst_path = os.path.join(dst_dir, os.path.basename(src_path)) if not os.path.exists(dst_path): shutil.copy(src_path, dst_dir) def GenerateDiffPatch(options, orig_file, new_file, patch_file): if (options.diff_algorithm == "COURGETTE"): exe_file = os.path.join(options.last_chrome_installer, COURGETTE_EXEC) cmd = '%s -gen "%s" "%s" "%s"' % (exe_file, orig_file, new_file, patch_file) else: exe_file = os.path.join(options.build_dir, BSDIFF_EXEC) cmd = [exe_file, orig_file, new_file, patch_file,] RunSystemCommand(cmd) def GetLZMAExec(build_dir): lzma_exec = os.path.join(build_dir, "..", "..", "third_party", "lzma_sdk", "Executable", "7za.exe") return lzma_exec def GetPrevVersion(build_dir, temp_dir, last_chrome_installer, output_name): if not last_chrome_installer: return '' lzma_exec = GetLZMAExec(build_dir) prev_archive_file = os.path.join(last_chrome_installer, output_name + ARCHIVE_SUFFIX) cmd = [lzma_exec, 'x', '-o"%s"' % temp_dir, prev_archive_file, 'Chrome-bin/*/chrome.dll',] RunSystemCommand(cmd) dll_path = glob.glob(os.path.join(temp_dir, 'Chrome-bin', '*', 'chrome.dll')) return os.path.split(os.path.split(dll_path[0])[0])[1] def MakeStagingDirectories(staging_dir): """Creates a staging path for installer archive. If directory exists already, deletes the existing directory. """ file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR) if os.path.exists(file_path): shutil.rmtree(file_path) os.makedirs(file_path) temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR) if os.path.exists(temp_file_path): shutil.rmtree(temp_file_path) os.makedirs(temp_file_path) return (file_path, temp_file_path) def Readconfig(input_file, current_version): """Reads config information from input file after setting default value of global variabes. """ variables = {} variables['ChromeDir'] = CHROME_DIR variables['VersionDir'] = os.path.join(variables['ChromeDir'], current_version) config = ConfigParser.SafeConfigParser(variables) config.read(input_file) return config def RunSystemCommand(cmd, **kw): print 'Running', cmd exit_code = subprocess.call(cmd, **kw) if (exit_code != 0): raise Exception("Error while running cmd: %s, exit_code: %s" % (cmd, exit_code)) def CreateArchiveFile(options, staging_dir, current_version, prev_version): """Creates a new installer archive file after deleting any existing old file. """ # First create an uncompressed archive file for the current build (chrome.7z) lzma_exec = GetLZMAExec(options.build_dir) archive_file = os.path.join(options.output_dir, options.output_name + ARCHIVE_SUFFIX) cmd = [lzma_exec, 'a', '-t7z', archive_file, os.path.join(staging_dir, CHROME_DIR), '-mx0',] # There doesnt seem to be any way in 7za.exe to override existing file so # we always delete before creating a new one. if not os.path.exists(archive_file): RunSystemCommand(cmd) elif options.skip_rebuild_archive != "true": os.remove(archive_file) RunSystemCommand(cmd) # If we are generating a patch, run bsdiff against previous build and # compress the resulting patch file. If this is not a patch just compress the # uncompressed archive file. patch_name_prefix = options.output_name + CHROME_PATCH_FILE_SUFFIX if options.last_chrome_installer: prev_archive_file = os.path.join(options.last_chrome_installer, options.output_name + ARCHIVE_SUFFIX) patch_file = os.path.join(options.build_dir, patch_name_prefix + PATCH_FILE_EXT) GenerateDiffPatch(options, prev_archive_file, archive_file, patch_file) compressed_archive_file = patch_name_prefix + '_' + \ current_version + '_from_' + prev_version + \ COMPRESSED_FILE_EXT orig_file = patch_file else: compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX orig_file = archive_file compressed_archive_file_path = os.path.join(options.output_dir, compressed_archive_file) CompressUsingLZMA(options.build_dir, compressed_archive_file_path, orig_file) return compressed_archive_file def PrepareSetupExec(options, current_version, prev_version): """Prepares setup.exe for bundling in mini_installer based on options.""" if options.setup_exe_format == "FULL": setup_file = SETUP_EXEC elif options.setup_exe_format == "DIFF": if not options.last_chrome_installer: raise Exception( "To use DIFF for setup.exe, --last_chrome_installer is needed.") prev_setup_file = os.path.join(options.last_chrome_installer, SETUP_EXEC) new_setup_file = os.path.join(options.build_dir, SETUP_EXEC) patch_file = os.path.join(options.build_dir, SETUP_PATCH_FILE_PREFIX + PATCH_FILE_EXT) GenerateDiffPatch(options, prev_setup_file, new_setup_file, patch_file) setup_file = SETUP_PATCH_FILE_PREFIX + '_' + current_version + \ '_from_' + prev_version + COMPRESSED_FILE_EXT setup_file_path = os.path.join(options.build_dir, setup_file) CompressUsingLZMA(options.build_dir, setup_file_path, patch_file) else: cmd = ['makecab.exe', '/D', 'CompressionType=LZX', '/V1', '/L', options.output_dir, os.path.join(options.build_dir, SETUP_EXEC),] # Send useless makecab progress on stdout to the bitbucket. RunSystemCommand(cmd, stdout=open(os.devnull, "w")) setup_file = SETUP_EXEC[:-1] + "_" return setup_file _RESOURCE_FILE_TEMPLATE = """\ // This file is automatically generated by create_installer_archive.py. // It contains the resource entries that are going to be linked inside // mini_installer.exe. For each file to be linked there should be two // lines: // - The first line contains the output filename (without path) and the // type of the resource ('BN' - not compressed , 'BL' - LZ compressed, // 'B7' - LZMA compressed) // - The second line contains the path to the input file. Uses '/' to // separate path components. %(setup_file)s %(setup_file_resource_type)s "%(setup_file_path)s" %(archive_file)s B7 "%(archive_file_path)s" """ def CreateResourceInputFile( output_dir, setup_format, archive_file, setup_file, resource_file_path): """Creates resource input file (packed_files.txt) for mini_installer project. This method checks the format of setup.exe being used and according sets its resource type. """ setup_resource_type = "BL" if (setup_format == "FULL"): setup_resource_type = "BN" elif (setup_format == "DIFF"): setup_resource_type = "B7" # Expand the resource file template. args = { 'setup_file': setup_file, 'setup_file_resource_type': setup_resource_type, 'setup_file_path': os.path.join(output_dir, setup_file).replace("\\","/"), 'archive_file': archive_file, 'archive_file_path': os.path.join(output_dir, archive_file).replace("\\","/"), } resource_file = _RESOURCE_FILE_TEMPLATE % args with open(resource_file_path, 'w') as f: f.write(resource_file) # Reads |manifest_name| from |build_dir| and writes |manifest_name| to # |output_dir| with the same content plus |inserted_string| added just before # |insert_before|. def CopyAndAugmentManifest(build_dir, output_dir, manifest_name, inserted_string, insert_before): manifest_file = open(os.path.join(build_dir, manifest_name), 'r') manifest_lines = manifest_file.readlines() manifest_file.close() insert_line = -1 insert_pos = -1 for i in xrange(len(manifest_lines)): insert_pos = manifest_lines[i].find(insert_before) if insert_pos != -1: insert_line = i break if insert_line == -1: raise ValueError('Could not find {0} in the manifest:\n{1}'.format( insert_before, ''.join(manifest_lines))) old = manifest_lines[insert_line] manifest_lines[insert_line] = (old[:insert_pos] + inserted_string + old[insert_pos:]) modified_manifest_file = open( os.path.join(output_dir, manifest_name), 'w') modified_manifest_file.write(''.join(manifest_lines)) modified_manifest_file.close() # Copy the relevant CRT DLLs to |build_dir|. We copy DLLs from all versions # of VS installed to make sure we have the correct CRT version, unused DLLs # should not conflict with the others anyways. def CopyVisualStudioRuntimeDLLs(build_dir): is_debug = os.path.basename(build_dir) == 'Debug' if not is_debug and os.path.basename(build_dir) != 'Release': print ("Warning: could not determine build configuration from " "output directory, assuming Release build.") crt_dlls = [] if is_debug: crt_dlls = glob.glob( "C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/" "Debug_NonRedist/x86/Microsoft.*.DebugCRT/*.dll") else: crt_dlls = glob.glob( "C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/x86/" "Microsoft.*.CRT/*.dll") # Also handle the case where someone is building using only winsdk and # doesn't have Visual Studio installed. if not crt_dlls: # On a 64-bit system, 32-bit dlls are in SysWOW64 (don't ask). if os.access("C:/Windows/SysWOW64", os.F_OK): sys_dll_dir = "C:/Windows/SysWOW64" else: sys_dll_dir = "C:/Windows/System32" if is_debug: crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0d.dll")) else: crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0.dll")) if not crt_dlls: print ("Warning: could not find CRT DLLs to copy to build dir - target " "may not run on a system that doesn't have those DLLs.") for dll in crt_dlls: shutil.copy(dll, build_dir) # Copies component build DLLs and generates required config files and manifests # in order for chrome.exe and setup.exe to be able to find those DLLs at # run-time. # This is meant for developer builds only and should never be used to package # an official build. def DoComponentBuildTasks(staging_dir, build_dir, current_version): # Get the required directories for the upcoming operations. chrome_dir = os.path.join(staging_dir, CHROME_DIR) version_dir = os.path.join(chrome_dir, current_version) installer_dir = os.path.join(version_dir, 'Installer') # |installer_dir| is technically only created post-install, but we need it # now to add setup.exe's config and manifest to the archive. if not os.path.exists(installer_dir): os.mkdir(installer_dir) # Copy the VS CRT DLLs to |build_dir|. This must be done before the general # copy step below to ensure the CRT DLLs are added to the archive and marked # as a dependency in the exe manifests generated below. CopyVisualStudioRuntimeDLLs(build_dir) # Copy all the DLLs in |build_dir| to the version directory. Simultaneously # build a list of their names to mark them as dependencies of chrome.exe and # setup.exe later. dlls = glob.glob(os.path.join(build_dir, '*.dll')) dll_names = [] for dll in dlls: shutil.copy(dll, version_dir) dll_names.append(os.path.splitext(os.path.basename(dll))[0]) exe_config = ( "<configuration>\n" " <windows>\n" " <assemblyBinding xmlns='urn:schemas-microsoft-com:asm.v1'>\n" " <probing privatePath='{rel_path}'/>\n" " </assemblyBinding>\n" " </windows>\n" "</configuration>") # Write chrome.exe.config to point to the version directory. chrome_exe_config_file = open( os.path.join(chrome_dir, 'chrome.exe.config'), 'w') chrome_exe_config_file.write(exe_config.format(rel_path=current_version)) chrome_exe_config_file.close() # Write setup.exe.config to point to the version directory (which is one # level up from setup.exe post-install). setup_exe_config_file = open( os.path.join(installer_dir, 'setup.exe.config'), 'w') setup_exe_config_file.write(exe_config.format(rel_path='..')) setup_exe_config_file.close() # Add a dependency for each DLL in |dlls| to the existing manifests for # chrome.exe and setup.exe. Some of these DLLs are not actually used by # either process, but listing them all as dependencies doesn't hurt as it # only makes them visible to the exes, just like they already are in the # build output directory. exe_manifest_dependencies_list = [] for name in dll_names: exe_manifest_dependencies_list.append( "<dependency>" "<dependentAssembly>" "<assemblyIdentity type='win32' name='chrome.{dll_name}' " "version='0.0.0.0' processorArchitecture='x86' language='*'/>" "</dependentAssembly>" "</dependency>".format(dll_name=name)) exe_manifest_dependencies = ''.join(exe_manifest_dependencies_list) # Write a modified chrome.exe.manifest beside chrome.exe. CopyAndAugmentManifest(build_dir, chrome_dir, 'chrome.exe.manifest', exe_manifest_dependencies, '</assembly>') # Write a modified setup.exe.manifest beside setup.exe in # |version_dir|/Installer. CopyAndAugmentManifest(build_dir, installer_dir, 'setup.exe.manifest', exe_manifest_dependencies, '</assembly>') # Generate assembly manifests for each DLL in |dlls|. These do not interfere # with the private manifests potentially embedded in each DLL. They simply # allow chrome.exe and setup.exe to see those DLLs although they are in a # separate directory post-install. for name in dll_names: dll_manifest = ( "<assembly\n" " xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>\n" " <assemblyIdentity name='chrome.{dll_name}' version='0.0.0.0'\n" " type='win32' processorArchitecture='x86'/>\n" " <file name='{dll_name}.dll'/>\n" "</assembly>".format(dll_name=name)) dll_manifest_file = open(os.path.join( version_dir, "chrome.{dll_name}.manifest".format(dll_name=name)), 'w') dll_manifest_file.write(dll_manifest) dll_manifest_file.close() def main(options): """Main method that reads input file, creates archive file and write resource input file. """ current_version = BuildVersion(options.build_dir) config = Readconfig(options.input_file, current_version) (staging_dir, temp_dir) = MakeStagingDirectories(options.staging_dir) prev_version = GetPrevVersion(options.build_dir, temp_dir, options.last_chrome_installer, options.output_name) # Preferentially copy the files we can find from the output_dir, as # this is where we'll find the Syzygy-optimized executables when # building the optimized mini_installer. if options.build_dir != options.output_dir: CopyAllFilesToStagingDir(config, options.distribution, staging_dir, options.output_dir, options.enable_hidpi, options.enable_touch_ui) # Now copy the remainder of the files from the build dir. CopyAllFilesToStagingDir(config, options.distribution, staging_dir, options.build_dir, options.enable_hidpi, options.enable_touch_ui) if options.component_build == '1': DoComponentBuildTasks(staging_dir, options.build_dir, current_version) version_numbers = current_version.split('.') current_build_number = version_numbers[2] + '.' + version_numbers[3] prev_build_number = '' if prev_version: version_numbers = prev_version.split('.') prev_build_number = version_numbers[2] + '.' + version_numbers[3] # Name of the archive file built (for example - chrome.7z or # patch-<old_version>-<new_version>.7z or patch-<new_version>.7z archive_file = CreateArchiveFile(options, staging_dir, current_build_number, prev_build_number) setup_file = PrepareSetupExec(options, current_build_number, prev_build_number) CreateResourceInputFile(options.output_dir, options.setup_exe_format, archive_file, setup_file, options.resource_file_path) def _ParseOptions(): parser = optparse.OptionParser() parser.add_option('-i', '--input_file', help='Input file describing which files to archive.') parser.add_option('-b', '--build_dir', help='Build directory. The paths in input_file are relative to this.') parser.add_option('--staging_dir', help='Staging directory where intermediate files and directories ' 'will be created') parser.add_option('-o', '--output_dir', help='The output directory where the archives will be written. ' 'Defaults to the build_dir.') parser.add_option('--resource_file_path', help='The path where the resource file will be output. ' 'Defaults to %s in the build directory.' % MINI_INSTALLER_INPUT_FILE) parser.add_option('-d', '--distribution', help='Name of Chromium Distribution. Optional.') parser.add_option('-s', '--skip_rebuild_archive', default="False", help='Skip re-building Chrome.7z archive if it exists.') parser.add_option('-l', '--last_chrome_installer', help='Generate differential installer. The value of this parameter ' 'specifies the directory that contains base versions of ' 'setup.exe, courgette.exe (if --diff_algorithm is COURGETTE) ' '& chrome.7z.') parser.add_option('-f', '--setup_exe_format', default='COMPRESSED', help='How setup.exe should be included {COMPRESSED|DIFF|FULL}.') parser.add_option('-a', '--diff_algorithm', default='BSDIFF', help='Diff algorithm to use when generating differential patches ' '{BSDIFF|COURGETTE}.') parser.add_option('-n', '--output_name', default='chrome', help='Name used to prefix names of generated archives.') parser.add_option('--enable_hidpi', default='0', help='Whether to include HiDPI resource files.') parser.add_option('--enable_touch_ui', default='0', help='Whether to include resource files from the "TOUCH" section of the ' 'input file.') parser.add_option('--component_build', default='0', help='Whether this archive is packaging a component build.') options, _ = parser.parse_args() if not options.build_dir: parser.error('You must provide a build dir.') options.build_dir = os.path.normpath(options.build_dir) if not options.staging_dir: parser.error('You must provide a staging dir.') if not options.input_file: parser.error('You must provide an input file') if not options.output_dir: options.output_dir = options.build_dir if not options.resource_file_path: options.resource_file_path = os.path.join(options.build_dir, MINI_INSTALLER_INPUT_FILE) return options if '__main__' == __name__: print sys.argv sys.exit(main(_ParseOptions()))
{ "content_hash": "8beb1221d8800db446d1d9b8d5f46480", "timestamp": "", "source": "github", "line_count": 593, "max_line_length": 80, "avg_line_length": 40.03878583473862, "alnum_prop": 0.664237880638504, "repo_name": "keishi/chromium", "id": "b01468a7212f467a4d6b2c22f3e428a0f85f10c8", "size": "23743", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chrome/tools/build/win/create_installer_archive.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "1172794" }, { "name": "C", "bytes": "67452317" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "132681259" }, { "name": "F#", "bytes": "381" }, { "name": "Go", "bytes": "19048" }, { "name": "Java", "bytes": "361412" }, { "name": "JavaScript", "bytes": "16603687" }, { "name": "Objective-C", "bytes": "9609581" }, { "name": "PHP", "bytes": "97796" }, { "name": "Perl", "bytes": "918683" }, { "name": "Python", "bytes": "6407891" }, { "name": "R", "bytes": "524" }, { "name": "Shell", "bytes": "4192593" }, { "name": "Tcl", "bytes": "277077" } ], "symlink_target": "" }
from setuptools import setup, Extension import subprocess import sys long_description = """ This Python package is a high-level wrapper for Kerberos (GSSAPI) operations. The goal is to avoid having to build a module that wraps the entire Kerberos.framework, and instead offer a limited set of functions that do what is needed for client/server Kerberos authentication based on <http://www.ietf.org/rfc/rfc4559.txt>. """ # Backport from Python 2.7 in case we're in 2.6. def check_output(*popenargs, **kwargs): process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output def check_krb5_version(): krb5_vers = check_output(["krb5-config", "--version"], universal_newlines=True).split() if len(krb5_vers) == 4: if int(krb5_vers[3].split('.')[1]) >= 10: return r'-DGSSAPI_EXT' extra_link_args = check_output( ["krb5-config", "--libs", "gssapi"], universal_newlines=True ).split() extra_compile_args = check_output( ["krb5-config", "--cflags", "gssapi"], universal_newlines=True ).split() krb5_ver = check_krb5_version() if krb5_ver: extra_compile_args.append(krb5_ver) setup ( name = "pykerberos", version = "1.1.6", description = "High-level interface to Kerberos", long_description=long_description, license="ASL 2.0", classifiers = [ "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Systems Administration :: Authentication/Directory" ], ext_modules = [ Extension( "kerberos", extra_link_args = extra_link_args, extra_compile_args = extra_compile_args, sources = [ "src/kerberos.c", "src/kerberosbasic.c", "src/kerberosgss.c", "src/kerberospw.c", "src/base64.c" ], ), ], )
{ "content_hash": "ae83486111b14d7e6f9afda641cf8009", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 91, "avg_line_length": 32.19444444444444, "alnum_prop": 0.6186367558239861, "repo_name": "dave-bouchillon/pykerberos", "id": "92072661ed59cad50cb127739eb02be416c8594e", "size": "2925", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "64572" }, { "name": "Makefile", "bytes": "456" }, { "name": "Python", "bytes": "24948" } ], "symlink_target": "" }
try: from rpython.rlib.unroll import unrolling_iterable except ImportError: "NOT_RPYTHON" def unrolling_iterable(values): return values
{ "content_hash": "26b3a9268504fdbe395b8c2de6b7bd0b", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 54, "avg_line_length": 26, "alnum_prop": 0.717948717948718, "repo_name": "SOM-st/RPySOM", "id": "fe4c26ac6e1ce6bf8e189154bdce4a9760623a53", "size": "156", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/rlib/unroll.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "183215" }, { "name": "Shell", "bytes": "223" } ], "symlink_target": "" }
import os from pathlib import Path from typing import List import numpy as np import pandas as pd import csv import eccpy def aaa(df_or_series): """ Function for use in debugging. Saves pandas Series or Dataframes to a user-defined csv file. """ # convert any series to dataframe if isinstance(df_or_series, pd.Series): df_or_series = df_or_series.to_frame() csv_out = r"D:\data\000_aaa_temp_df_out.csv" df_or_series.to_csv(csv_out, sep=",", quoting=csv.QUOTE_NONNUMERIC) def hill_eq(hill_constants, x): """ Four parameter sigmoidal Hill equation. y = upper + (lower-upper)/(1+(x/EC50)**-hillslope) Parameters ---------- hill_constants : tuple Tuple of the four parameters : upper, lower, EC50 and hillslope x : float x-value for use in the equation Constants --------- upper : float Renamed from "bottom" in some sources. Will approach the minimum response (perhaps zero) in the dose-response S-curve. Will approach the maximum response in an inverse LD50 curve. Is not currently used as a parameter to judge whether curve is sigmoidal. lower : float Renamed from "top" in some sources. Will approach the maximum response in the dose-response S-curve. Will approach zero in an inverse LD50 curve. Is not currently used as a parameter to judge whether curve is sigmoidal. EC50 : float Does not always accurately reflect the EC50 from the data. Is not currently used as a parameter to judge whether curve is sigmoidal. Is not currently used as the calculated EC50 value. hillslope : float A high hillslope generally indicates a steep curve, an accurate EC50 calculation, and a strong sigmoidal shape. Note that the hillslope can be strongly negative. Hillslope values approaching zero (e.g. -1 > hill_slope > +1) generally indicate an exponential, rather than a sigmoidal curve. For this reason, hillslope is currently used as a parameter to judge whether the curve is sigmoidal, using the parameters chosen in the settings_excel_file and implemented in the "judge_fit" program. Notes ----- The hill equation is used for the EC50 calculation, but there are several variants of the formula. Dose-Response Equations variant 1: http://www.graphpad.com/guides/prism/6/curve-fitting/index.htm?reg_classic_dr_variable.htm variant 2: http://en.wikipedia.org/wiki/EC50 variant 3: http://pydoc.net/Python/cgptoolbox/0.1.2/cgp.sigmoidmodels.doseresponse/ Sigmoid equations: variant 4: y = c * 1.0 / (1.0 + ((k/x)**g)) variant 5: y = c / (1 + np.exp(-k*(x-x0))) + y0 (http://stackoverflow.com/questions/7588371/scipy-leastsq-goodness-of-fit-estimator) This equation uses variant 2. See Wikipedia for a more detailed explanation. In theory, any formula that accurately fits a sigmoidal curve is appropriate for the purpose. The Hill equation is preferred, because under some circumstances the EC50 value is one of the fitted parameters in the hill_constants tuple. However this value is extremely unreliable, and regularly gives EC50 values that are out of the range of the datapoints. A more accurate EC50 calculation is achieved through root finding using the brent equation. """ upper, lower, EC50, hillslope = hill_constants y = upper + (lower-upper)/(1+(x/EC50)**-hillslope) return y def residuals(constants, function, x, y): """ Function used to optimise the fit of the curve to the data. It calculates the distance between y-value from real data and y-value from the function (sigmoid/sine/etc). """ return y - function(constants, x) def hill_eq_brentq(xvalues_for_curve, hill_constants, y_value_curve_center): """ Residual function for the four parameter sigmoidal Hill equation. For further detail on the Hill equation, see the relevant docstring for hill_eq. y = hill_eq(x) - y_value_curve_center Parameters ---------- xvalues_for_curve : array Numpy array of >250 x-values between the lowest and highest dose. hill_constants : tuple Tuple of the four parameters : upper, lower, EC50 and hillslope y_value_curve_center : array Represents the y-value of the curve. The difference between this and the real y-value from experimental data (e.g. y_orig_norm) is used to optimise the fit of the curve to the data. Returns ------- y - y_value_curve_center : array Array that approaches zero, when an optimum fit is found. """ upper, lower, EC50, hillslope = hill_constants y = upper + (lower-upper)/(1+(xvalues_for_curve/EC50)**-hillslope) return y - y_value_curve_center def normalise_0_1(arraylike): """ Normalise an array to values between 0 and 1. The following linear formula is used. norm_array = (orig_array - array_min)/(array_max - array_min) The use of this simple linear formula allows the normalised data to be "denormalised" later, so long as the min and max values of the original array are known. Parameters ---------- arraylike : array Numpy array (or other arraylike) dataset of floats or ints to be normalised. Returns ------- normalised : array Array of floats, containing the normalised datapoints. array_min : float Minimum value of the original data. Necessary in order to "denormalise" the data later, back to the effective original values. array_max : float Maximum value of the original data. Necessary in order to "denormalise" the data later, back to the effective original values. Usage ----- normalised_array, min_, max_ = normalise_0_1(original_array) # or, if denormalisation is not necessary normalised_array = normalise_0_1(original_array)[0] # for further usage examples, see the docstring for denormalise_0_1 """ array_min = np.min(arraylike) array_max = np.max(arraylike) normalised = (arraylike - array_min)/(array_max - array_min) # convert to float normalised = np.array(normalised).astype(float) return normalised, array_min, array_max def denormalise_0_1(value_or_array, array_min, array_max): """ Denormalise a value or array to orig values. For use after normalisation between 0 and 1 with the normalise_0_1 function. The normalisation formula (normalise_0_1): norm_array = (orig_array - array_min)/(array_max - array_min) The denormalisation formula (denormalise_0_1): denormalised_array = norm_array*(array_max - array_min) + array_min Parameters ---------- value_or_array : int, float or arraylike Int or float to be denormalised. Numpy array (or other arraylike) of data (float, int, etc) to be denormalised. Returns ------- normalised : float, or numpy array Array of floats, containing the normalised datapoints. array_min : float Minimum value of the original data. Necessary in order to "denormalise" the data later, back to the effective original values. array_max : float Maximum value of the original data. Necessary in order to "denormalise" the data later, back to the effective original values. Usage ----- from eccpy.tools import normalise_0_1, denormalise_0_1 import numpy as np original_array = np.linspace(10,130,10) original_array[2], original_array[4] = 3, 140 print(original_array) # normalise original array normalised_array, min_, max_ = normalise_0_1(original_array) print(normalised_array) # do stuff to normalised array (e.g., multiply by 0.5) normalised_array_halved = normalised_array * 0.5 # denormalise values to match the equivalents in the original array. # Note that the min value (3) was normalised to zero, and was therefore not affected by multiplication. normalised_array_halved_denorm = denormalise_0_1(normalised_array_halved, min_, max_) print(normalised_array_halved_denorm) # now calculate average values, and check that they match norm_array_mean = np.mean(normalised_array) norm_array_mean_denormalised = denormalise_0_1(norm_array_mean, min_, max_) orig_array_mean = np.mean(original_array) # print the two mean values. They should be equal. print(norm_array_mean_denormalised) print(orig_array_mean) """ if isinstance(value_or_array, list): raise ValueError('this function accepts arraylike data, not a list. ' 'Please check data or convert list to numpy array') elif isinstance(value_or_array, float): #print("found a float") denormalised = value_or_array*(array_max - array_min) + array_min elif isinstance(value_or_array, np.ndarray): #print("found an array") denormalised = value_or_array*(array_max - array_min) + array_min elif isinstance(value_or_array, pd.Series): #print("found a series") denormalised = value_or_array*(array_max - array_min) + array_min else: print("Unknown datatype. denormalise_0_1 has been given an input that does not appear to be " "an int, float, np.ndarray or pandas Series\n" "Attempting to process as if it is arraylike.....") return denormalised def normalise_between_2_values(arraylike, min_value, max_value, invert=False): """Normalises an array of data between two desired values. Any values below min_value will be converted to 0. Any values above max_value will be converted to 1. Optionally, the normalised array can be inverted, so that the original highest values are 0, and the original lowest values are now 1. Parameters ---------- arraylike : np.ndarray Arraylike original data (numpy array or pandas Series) min_value : float Desired minimum value for normalisation max_value : float Desired max value for normalisation invert : bool If True, normalised data will be inverted (former highest value = 0) Returns ------- normalised : np.ndarray Normalised array of data Usage ----- from eccpy.tools import normalise_between_2_values # for array orig_array = np.array(range(0, 15)) norm_array = normalise_between_2_values(orig_array, 3, 10) # for pandas Dataframe df["norm_data"] = normalise_between_2_values(df["orig_data"], 3, 10) """ # normalise array between min and max values normalised = (arraylike - min_value)/(max_value - min_value) # replace anything above 1 with 1 normalised[normalised > 1] = 1 # replace anything below 0 with 0 normalised[normalised < 0] = 0 # if desired, invert the normalised values if invert: normalised = abs(normalised - 1) return normalised def setup_t20_colour_list(): """ Setup a list of colours for the figures. Returns ------- t20 : list List of RGB colour tuples, normalised between 0 and 1 as used by python colours. """ """ Setup colours for the figures return : t20, a list of colour tuples """ colour_lists_dict = {} #define colour lists colour_lists = { 'tableau20' : [ (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229) ], 'tableau20blind' : [ (0, 107, 164), (255, 128, 14), (171, 171, 171), (89, 89, 89), (95, 158, 209), (200, 82, 0), (137, 137, 137), (163, 200, 236), (255, 188, 121), (207, 207, 207) ] } #normalise the colours for the colour lists for rgb_list in colour_lists: colour_array = np.array(colour_lists[rgb_list])/255. colour_array_tup = tuple(map(tuple,colour_array)) colour_lists[rgb_list] = colour_array_tup #add normalised colours to output dictionary colour_lists_dict[rgb_list] = colour_lists[rgb_list] # extract the tableau20 lists, join together t20 = list(colour_lists_dict["tableau20"] + colour_lists_dict["tableau20blind"]) # extend the list in case someone need a long list of colours (after 30 colours, they will be redundant!) t20 = t20 + t20 + t20 + t20 + t20 return t20 def reindex_df_so_selected_cols_are_first(df, selected_cols, raise_error=True): """ Reindex DataFrame so that selected columns are displayed first (on the left) Parameters ---------- df : pandas DataFrame Pandas 2D DataFrame with unique columns selected_cols : list List of strings representing the column names to place first Returns ------- df : pandas DataFrame Original DataFrame with altered column order """ # convert columns to list col_list_orig = list(df.columns) # remove the list_cols_to_place_first from the original columns for col in selected_cols: if col in col_list_orig: col_list_orig.remove(col) else: if raise_error == True: raise ValueError("\n\nError, reindex_df_so_selected_cols_are_first, '%s' not in columns" % col) else: print("\n\nError, reindex_df_so_selected_cols_are_first, '%s' not in columns" % col) # join to create desired list of columns, and reindex the dataframe col_list_final = selected_cols + col_list_orig return df.reindex(columns = col_list_final) def convert_listlike_cols_to_str(df, list_cols, convert_nan=False, nanreplacement = "[]"): """ Convert listlike values in pandas DataFrame to stringlists. Writing a DataFrame to excel and csv raises errors due to presence of listlike or arraylike data. Here, all listlike and arraylike data is converted to a "stringlist" Parameters ---------- df : pandas DataFrame Pandas DataFrame where some values are listlike or arraylike list_cols : list List of columns that contain listlike/arraylike should be converted to stringlists convert_nan : bool Whether np.nan values will be converted nanreplacement : string String to replace np.nan with. Default is a string representing an empty list. "[]" Returns ---------- df : pandas DataFrame DataFrame with listlike and arraylike converted to stringlists Note: ---------- # Convert individual stringlist back to a numpy array as follows np.array(ast.literal_eval(stringlist)) # convert a column of stringlists back to arrays as follows df.loc[:,"x"] = df.loc[:,"x"].apply(lambda x : np.array(ast.literal_eval(x))) """ for col in list_cols: if col in df: # convert each np.array to a list, and then a stringlist series = df[col].dropna() # check if the series is empty (no data) if series.empty == False: example_data1 = series[0] # if the datatype is a numpy array or pandas series, convert to a list if "ndarray" in str(type(example_data1)) or "Series" in str(type(example_data1)): df[col] = series.apply(lambda x: list(x)) example_data2 = df[col].dropna()[0] # check that the first nonnan datapoint is now a list if "list" in str(type(example_data2)): df[col] = df[col].dropna().apply(lambda x: str(x)) else: raise TypeError("datatype for col {a}, ({b}) is not listlike".format(a=col, b=str(type(example_data2)))) # if desired, convert np.nan to empty stringlists if convert_nan == True: df[col] = df[col].fillna(nanreplacement) else: #do nothing. There is no listlike data in the column, because all values are empty pass else: raise KeyError("The column {} is not in the dataframe".format(col)) return df class DatafileError(Exception): """ Custom error, to be used when there is a problem with the datafiles """ pass class DataMismatchError(Exception): """ Custom error, to be used when there is a mismatch in the data somehow, such as an unequal number of dose and response concentrations, or the Contains_Data columns/rows do not match up. """ pass def create_dict_organising_subplots(n_plots_per_fig,n_rows): ''' Function to help organise the creation of figures that contain multiple plots. Parameters ---------- n_plots_per_fig : int Number of plots per figure in total n_rows : int Number of rows of plots per figure Notes ----- For example, 15 histograms printed in figures with 8 histograms per figure/page. Returns a dict that gives a tuple for each plot/graph. newfig, savefig, fig_nr, plot_nr_in_fig, r, c r and c are used to index pyplot subplots as follows fig, axarr = plt.subplots(2,2) plotcontainer = axarr[r,c].plot(x, y) ''' dict_organising_subplots = {} #figure number fig_nr = 0 #plot number in figure plot_nr_in_fig = 0 #row number in figure r = 0 #column number in figure c = 0 #whether the figure needs to be saved savefig = False #whether a new figure needs to be created newfig = True for plotnr in range(1, 500): #add current counters to dict dict_organising_subplots[plotnr] = (newfig, savefig, fig_nr, plot_nr_in_fig, r, c) plot_nr_in_fig += 1 r += 1 newfig = False savefig = False #if plot_nr_in_fig is the last one before the new figure, then savefig = True if plot_nr_in_fig % (n_plots_per_fig - 1) == 0 and plot_nr_in_fig != 0: savefig = True #if plot_nr_in_fig is in a multiple of n_rows, then the plot goes to the second column if plot_nr_in_fig % n_rows == 0 and plot_nr_in_fig != 0: c += 1 r = 0 #if the plotnr is in a multiple of n_plots_per_fig, then a new figure needs to created, and everything else reset if plotnr % n_plots_per_fig == 0 and plotnr != 0: #go to second figure fig_nr += 1 #reset values plot_nr_in_fig = 0 r = 0 c = 0 newfig = True return dict_organising_subplots def convert_truelike_to_bool(input_item, convert_float=False, convert_nontrue=True): """Converts true-like values ("true", 1, True", "WAHR", etc) to python boolean True. Parameters ---------- input_item : string or int Item to be converted to bool (e.g. "true", 1, "WAHR" or the equivalent in several languagues) convert_float: bool Convert floats to bool. If True, "1.0" will be converted to True convert_nontrue : bool If True, the output for input_item not recognised as "True" will be False. If True, the output for input_item not recognised as "True" will be the original input_item. Returns ------- return_value : True, or input_item If input_item is True-like, returns python bool True. Otherwise, returns the input_item. Usage ----- # convert a single value or string convert_truelike_to_bool("true") # convert a column in a pandas DataFrame df["column_name"] = df["column_name"].apply(convert_truelike_to_bool) """ list_True_items = [True, 'True', "true","TRUE","T","t",'wahr', 'WAHR', 'prawdziwy', 'verdadeiro', 'sann', 'istinit', 'veritable', 'Pravda', 'sandt', 'vrai', 'igaz', 'veru', 'verdadero', 'sant', 'gwir', 'PRAWDZIWY', 'VERDADEIRO', 'SANN', 'ISTINIT', 'VERITABLE', 'PRAVDA', 'SANDT', 'VRAI', 'IGAZ', 'VERU', 'VERDADERO', 'SANT', 'GWIR', 'bloody oath', 'BLOODY OATH', 'nu', 'NU','damn right','DAMN RIGHT'] # if you want to accept 1.0 as a true value, add it to the list if convert_float: list_True_items += [1.0,"1.0"] # check if the user input string is in the list_True_items input_item_is_true = input_item in list_True_items # if you want to convert non-True values to "False", then nontrue_return_value = False if convert_nontrue: nontrue_return_value = False else: # otherwise, for strings not in the True list, the original string will be returned nontrue_return_value = input_item # return True if the input item is in the list. If not, return either False, or the original input_item return_value = input_item_is_true if input_item_is_true == True else nontrue_return_value return return_value def convert_nonelike_to_none(input_item): """Converts None-like values ("none", "NULL", None, etc) to the uniform string "None". Note, the output is NOT the python None, but a string. Parameters ---------- input_item : string or int Item to be converted to None (e.g. "none", "NULL" or the equivalent in several languagues) Returns ------- return_value : string If input_item is None-like, returns python string "None". Otherwise, returns the input_item. Usage ------- # convert a single value or string convert_nonelike_to_none("none") # convert a column in a pandas DataFrame df["column_name"] = df["column_name"].apply(convert_nonelike_to_none) """ list_None_items = [None, "none","NONE","null","NULL",'Nijedna', 'Cap', 'Niti', 'Ingen', 'Geen', 'Aucun', 'Keine', 'Okenn', 'Egyik', 'Tidak', 'Nessuno', 'Hakuna', 'pagh', 'Neviens', 'Tiada', 'L-eda', 'Mix', 'Ingen', 'Ninguno', 'Brak', 'Nenhum', 'Nici', 'Niko', 'Nobena', 'Ninguno', 'Ingen', 'Dim','NIJEDNA', 'CAP', 'NITI', 'INGEN', 'GEEN', 'AUCUN', 'KEINE', 'OKENN', 'EGYIK', 'TIDAK', 'NESSUNO', 'HAKUNA', 'PAGH', 'NEVIENS', 'TIADA', 'L-EDA', 'MIX', 'INGEN', 'NINGUNO', 'BRAK', 'NENHUM', 'NICI', 'NIKO', 'NOBENA', 'NINGUNO', 'INGEN', 'DIM'] # determine if input_item is in the list input_item_is_None = input_item in list_None_items # define the return_value as either the string "None" or the original input item return_value = "None" if input_item_is_None == True else input_item return return_value def format_cols_2digit(df, skip_last_col=True): """Formats a dataframes columns so that numbers are always two-digits (padded with 0) Parameters ---------- df : pandas DataFrame Input DataFrame. skip_last_col : bool A special case, where the very last column contains text, rather than a number, and should be excluded. Returns ------- reformatted_cols : list The DataFrame columns, reformatted with padded 0 to make 2 digits. """ if skip_last_col: # reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data") reformatted_cols = ["%02d" % col for col in df.columns[:-1]] # add last column back to list reformatted_cols.append(df.columns[-1]) else: # reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data") reformatted_cols = ["%02d" % col for col in df.columns] return reformatted_cols def assert_df_contains_no_nan_values(df: pd.DataFrame): df_contains_nan = df.isnull().any().any() if df_contains_nan: positions_with_nan: List[str] = [] for column_name in df.columns: for index_name in df.index: value = df.at[index_name, column_name] if pd.isnull(value): position_string = f"(row='{index_name}', column='{column_name}')" positions_with_nan.append(position_string) all_positions_str = ", and ".join(positions_with_nan) raise ValueError(f"\n\nThe 'files' tab of the excel settings appears to contain empty cells at {all_positions_str}. " "Please delete any partially filled rows. " "Fill empty cells with the text 'None' if necessary.") def get_eccpy_module_path()-> Path: eccpy_module_path = Path(os.path.abspath(eccpy.__file__)).parents[1] #eccpy_module_path = Path(__file__).parents[1] return eccpy_module_path
{ "content_hash": "081507edb7bd10e4c15c785f368a9405", "timestamp": "", "source": "github", "line_count": 603, "max_line_length": 125, "avg_line_length": 41.47097844112769, "alnum_prop": 0.6306634142440116, "repo_name": "teese/eccpy", "id": "d69d783cb2b71a55adfe8c2e0b123899bfafd529", "size": "25007", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "eccpy/tools.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "236637" } ], "symlink_target": "" }
"""Parses a torrent file.""" import io import logging import os import six from seedbox.torrent import bencode LOG = logging.getLogger(__name__) class ParsingError(Exception): """Holds parsing error messages. Error class representing errors that occur while parsing the torrent content. """ def __init__(self, error_msg): Exception.__init__(self) self.error_msg = error_msg def __str__(self): return repr(self.error_msg) DICT_TOKEN = 'd' LIST_TOKEN = 'l' INT_TOKEN = 'i' END_TOKEN = 'e' NEGATIVE = '-' STR_SEP_TOKEN = ':' class Bdecode(object): def __init__(self, data): self.data = six.BytesIO(data) def _next_char(self): return self.data.read(1).decode('utf-8', 'replace') def _prev_char(self): # offset: -1 # mode/whence: SEEK_CUR => 1 self.data.seek(-1, 1) def _parse_str(self): self._prev_char() str_len = self._parse_number(delimiter=STR_SEP_TOKEN) if not str_len: raise ParsingError( 'Empty string length found while parsing at position %d' % self.data.tell()) return self.data.read(str_len) def _parse_int(self): return self._parse_number(delimiter=END_TOKEN) def _parse_number(self, delimiter): parsed_int = '' while True: parsed_int_char = self._next_char() if parsed_int_char != NEGATIVE and not parsed_int_char.isdigit(): if parsed_int_char != delimiter: raise ParsingError( 'Invalid character %s found after parsing an ' 'integer (%s expected) at position %d.' % (parsed_int_char, delimiter, self.data.tell())) else: break parsed_int += parsed_int_char return int(parsed_int) def _parse_dict(self): parsed_dict = {} while True: dict_key = self.decode() if not dict_key: # End of dict break # parse value dict_value = self.decode() if isinstance(dict_value, six.binary_type): dict_value = dict_value.decode('utf-8', 'replace') parsed_dict.setdefault(dict_key.decode('utf-8'), dict_value) return parsed_dict def _parse_list(self): parsed_list = [] while True: list_item = self.decode() if not list_item: # End of list break if isinstance(list_item, six.binary_type): list_item = list_item.decode('utf-8', 'replace') parsed_list.append(list_item) return parsed_list def decode(self): """Decode torrent content. :returns: parsed content """ parsed_char = self._next_char() if parsed_char == END_TOKEN: return None elif parsed_char == INT_TOKEN: return self._parse_int() elif parsed_char.isdigit(): return self._parse_str() elif parsed_char == DICT_TOKEN: return self._parse_dict() elif parsed_char == LIST_TOKEN: return self._parse_list() @classmethod def parse(cls, data): """Helper method that creates decoder and decodes content. :returns: parsed content """ return cls(data).decode() class TorrentParser(object): def __init__(self, filepath): """Reads the torrent file and parses content. :param str filepath: Path to the torrent file to be parsed :raises IOError: when a file does not exists """ if not os.path.exists(filepath): raise IOError('No file found at %s' % filepath) self.file = filepath self._content = None @property def content(self): if self._content is None: self._content = self.load_content() return self._content def load_content(self): """Reads the torrent file and decodes content. .. note:: bencode is supremely more efficient parser for torrents but extremely strict in the format of the file. A custom parser based on another implementation handles parsing that is more flexible but it is not as efficient. Therefore when the file is well formed bencode is used but if it fails then the custom parser is used. If the custom parser fails then a ParsingError is raised. """ with io.open(file=self.file, mode='rb') as handle: content = handle.read() try: return bencode.bdecode(content) except bencode.BTFailure as bterr: LOG.info('bencode.bdecode failed: (%s); trying alternate approach', bterr) return Bdecode.parse(content) def get_file_details(self): """Retrieves details of the file(s) contained in the torrent. File details tuple: * name * length (size) :returns: file details embedded within torrent :rtype: list of tuples (name, length) """ parsed_files_info = [] files_info = self.content.get(b'info') if not files_info: return parsed_files_info multiple_files_info = files_info.get(b'files') LOG.debug('files: %s', multiple_files_info) if multiple_files_info: # the name attribute was holding the directory name that each # of the multiple files were contained within. dir_name = files_info.get(b'name').decode('utf-8') LOG.debug('dirname: %s', dir_name) for file_info in multiple_files_info: LOG.debug('file_info: %s', file_info) # simply append the directory to the concatenated list # of items under path, mostly it is a single item. parsed_files_info.append( (os.path.join(dir_name, os.path.sep.join( [x.decode('utf-8') for x in file_info.get(b'path')])), file_info.get(b'length'))) else: parsed_files_info.append( (files_info.get(b'name').decode('utf-8'), files_info.get(b'length'))) return parsed_files_info
{ "content_hash": "a6bbe2c5615c02bbce74db4b14616f45", "timestamp": "", "source": "github", "line_count": 220, "max_line_length": 79, "avg_line_length": 29.80909090909091, "alnum_prop": 0.5439158279963403, "repo_name": "shad7/seedbox", "id": "428d32e14c59909b10e59bb25a9677e70094949f", "size": "6558", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "seedbox/torrent/parser.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "210321" } ], "symlink_target": "" }
""" Audio Spectogram Transformer (AST) model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class ASTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the AST [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. patch_size (`int`, *optional*, defaults to `16`): The size (resolution) of each patch. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. frequency_stride (`int`, *optional*, defaults to 10): Frequency stride to use when patchifying the spectrograms. time_stride (`int`, *optional*, defaults to 10): Temporal stride to use when patchifying the spectrograms. max_length (`int`, *optional*, defaults to 1024): Temporal dimension of the spectrograms. num_mel_bins (`int`, *optional*, defaults to 128): Frequency dimension of the spectrograms (number of Mel-frequency bins). Example: ```python >>> from transformers import ASTConfig, ASTModel >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> configuration = ASTConfig() >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> model = ASTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "audio-spectrogram-transformer" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.patch_size = patch_size self.qkv_bias = qkv_bias self.frequency_stride = frequency_stride self.time_stride = time_stride self.max_length = max_length self.num_mel_bins = num_mel_bins
{ "content_hash": "2d14742fc1b3e32fb4a543e08f7dac18", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 121, "avg_line_length": 44.723214285714285, "alnum_prop": 0.6586144939109603, "repo_name": "huggingface/transformers", "id": "19f85189ad0dbd0be1f35ca11595230a6ed79d21", "size": "5649", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "6021" }, { "name": "C++", "bytes": "12959" }, { "name": "Cuda", "bytes": "175419" }, { "name": "Dockerfile", "bytes": "18218" }, { "name": "Jsonnet", "bytes": "937" }, { "name": "Makefile", "bytes": "3430" }, { "name": "Python", "bytes": "35742012" }, { "name": "Shell", "bytes": "30374" } ], "symlink_target": "" }
posts.update({'permalink':permalink}, {'$inc': {'comments.' + comment_ordinal + '.num_likes': 1}});
{ "content_hash": "146f5e0703f20649ad3d29c90c70ae43", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 99, "avg_line_length": 99, "alnum_prop": 0.6363636363636364, "repo_name": "hemmerling/nosql-mongodb2013", "id": "c2e45f2f13f6111d01e948c4be22ec75f87a2c57", "size": "139", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/m101j/final/final-4/hemmerling_final4.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "12956" }, { "name": "Java", "bytes": "196658" }, { "name": "JavaScript", "bytes": "143174" }, { "name": "Python", "bytes": "468885" }, { "name": "Shell", "bytes": "6505" }, { "name": "Smarty", "bytes": "24649" } ], "symlink_target": "" }
from google.appengine.ext import ndb import datetime import sys import logging def put(json): kind = json['kind'] if 'ancestor' in json: json_parent = json['ancestor'] parent_key = create_key(json_parent) p = create_generic_model_with_parent(kind, parent_key) else: p = create_generic_model(kind) for field in json['fields']: if 'type' in field: value = from_filter_type(field['value'], field['type']) setattr(p, field['field'], value) else: setattr(p, field['field'], field['value']) p.put() def create_generic_model(kind): class GenericModel(ndb.Expando): @classmethod def _get_kind(cls): return kind return GenericModel() def create_generic_model_with_parent(kind, parent_key): class GenericModel(ndb.Expando): @classmethod def _get_kind(cls): return kind return GenericModel(parent=parent_key) def create_key(json): kind = json['kind'] identifier = __get_identifier_to_key(json) return ndb.Key(flat=[kind, identifier]) def __get_identifier_to_key(json): identifier = None if 'id' in json: identifier = json['id'] if 'name' in json: identifier = json['name'] return identifier def from_filter_type(value, field_type): options = {'date': __long_to_date} # 'key': get_key} if field_type in options: return options[field_type](value) else: return value def to_filter_type(value): options = {'date': __date_to_long, 'key': __model_to_json} field_type = __get_field_type(value) if field_type in options: return options[field_type](value) else: return value def __get_field_type(value): if type(value) is datetime.datetime: return 'date' elif type(value) is ndb.Expando: return 'key' else: return None def __long_to_date(value): return datetime.datetime.fromtimestamp(value/1000) def __model_to_json(value): return value._to_dict()['kind'] def __date_to_long(value): return __unix_time_millis(value) def __unix_time(dt): epoch = datetime.datetime.utcfromtimestamp(0) delta = dt - epoch return delta.total_seconds() def __unix_time_millis(dt): return __unix_time(dt) * 1000.0
{ "content_hash": "5c19b58e1e214846028cc2c142d833f0", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 67, "avg_line_length": 21.88888888888889, "alnum_prop": 0.6095600676818951, "repo_name": "filipesimoes/ds-api", "id": "8b83527f86bcbd617a6ee665a10d8064e0ca8730", "size": "2364", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/controllers/entity.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "693" }, { "name": "HTML", "bytes": "18328" }, { "name": "JavaScript", "bytes": "15444" }, { "name": "Python", "bytes": "147561" }, { "name": "Shell", "bytes": "98" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals from pants.java.distribution.distribution import DistributionLocator from pants_test.subsystem.subsystem_util import init_subsystem def is_missing_jvm(version): init_subsystem(DistributionLocator) try: DistributionLocator.cached(minimum_version=version, maximum_version='{}.9999'.format(version)) return False except DistributionLocator.Error: return True
{ "content_hash": "a077e89e70c5c6b394640aa9d836e7a3", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 98, "avg_line_length": 35.38461538461539, "alnum_prop": 0.7978260869565217, "repo_name": "twitter/pants", "id": "8fce342fd2357c6622a7690e3891f21905b07eb4", "size": "607", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/python/pants_test/backend/jvm/tasks/missing_jvm_check.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "655" }, { "name": "C++", "bytes": "2010" }, { "name": "CSS", "bytes": "9444" }, { "name": "Dockerfile", "bytes": "5639" }, { "name": "GAP", "bytes": "1283" }, { "name": "Gherkin", "bytes": "919" }, { "name": "Go", "bytes": "2765" }, { "name": "HTML", "bytes": "85294" }, { "name": "Java", "bytes": "498956" }, { "name": "JavaScript", "bytes": "22906" }, { "name": "Python", "bytes": "6700799" }, { "name": "Rust", "bytes": "765598" }, { "name": "Scala", "bytes": "89346" }, { "name": "Shell", "bytes": "94395" }, { "name": "Thrift", "bytes": "2953" } ], "symlink_target": "" }
from __future__ import absolute_import, print_function, division import struct import sys import construct import six import netlib.exceptions from mitmproxy import exceptions from mitmproxy.contrib.tls import _constructs from mitmproxy.protocol import base from netlib import utils # taken from https://testssl.sh/openssl-rfc.mappping.html CIPHER_ID_NAME_MAP = { 0x00: 'NULL-MD5', 0x01: 'NULL-MD5', 0x02: 'NULL-SHA', 0x03: 'EXP-RC4-MD5', 0x04: 'RC4-MD5', 0x05: 'RC4-SHA', 0x06: 'EXP-RC2-CBC-MD5', 0x07: 'IDEA-CBC-SHA', 0x08: 'EXP-DES-CBC-SHA', 0x09: 'DES-CBC-SHA', 0x0a: 'DES-CBC3-SHA', 0x0b: 'EXP-DH-DSS-DES-CBC-SHA', 0x0c: 'DH-DSS-DES-CBC-SHA', 0x0d: 'DH-DSS-DES-CBC3-SHA', 0x0e: 'EXP-DH-RSA-DES-CBC-SHA', 0x0f: 'DH-RSA-DES-CBC-SHA', 0x10: 'DH-RSA-DES-CBC3-SHA', 0x11: 'EXP-EDH-DSS-DES-CBC-SHA', 0x12: 'EDH-DSS-DES-CBC-SHA', 0x13: 'EDH-DSS-DES-CBC3-SHA', 0x14: 'EXP-EDH-RSA-DES-CBC-SHA', 0x15: 'EDH-RSA-DES-CBC-SHA', 0x16: 'EDH-RSA-DES-CBC3-SHA', 0x17: 'EXP-ADH-RC4-MD5', 0x18: 'ADH-RC4-MD5', 0x19: 'EXP-ADH-DES-CBC-SHA', 0x1a: 'ADH-DES-CBC-SHA', 0x1b: 'ADH-DES-CBC3-SHA', # 0x1c: , # 0x1d: , 0x1e: 'KRB5-DES-CBC-SHA', 0x1f: 'KRB5-DES-CBC3-SHA', 0x20: 'KRB5-RC4-SHA', 0x21: 'KRB5-IDEA-CBC-SHA', 0x22: 'KRB5-DES-CBC-MD5', 0x23: 'KRB5-DES-CBC3-MD5', 0x24: 'KRB5-RC4-MD5', 0x25: 'KRB5-IDEA-CBC-MD5', 0x26: 'EXP-KRB5-DES-CBC-SHA', 0x27: 'EXP-KRB5-RC2-CBC-SHA', 0x28: 'EXP-KRB5-RC4-SHA', 0x29: 'EXP-KRB5-DES-CBC-MD5', 0x2a: 'EXP-KRB5-RC2-CBC-MD5', 0x2b: 'EXP-KRB5-RC4-MD5', 0x2f: 'AES128-SHA', 0x30: 'DH-DSS-AES128-SHA', 0x31: 'DH-RSA-AES128-SHA', 0x32: 'DHE-DSS-AES128-SHA', 0x33: 'DHE-RSA-AES128-SHA', 0x34: 'ADH-AES128-SHA', 0x35: 'AES256-SHA', 0x36: 'DH-DSS-AES256-SHA', 0x37: 'DH-RSA-AES256-SHA', 0x38: 'DHE-DSS-AES256-SHA', 0x39: 'DHE-RSA-AES256-SHA', 0x3a: 'ADH-AES256-SHA', 0x3b: 'NULL-SHA256', 0x3c: 'AES128-SHA256', 0x3d: 'AES256-SHA256', 0x3e: 'DH-DSS-AES128-SHA256', 0x3f: 'DH-RSA-AES128-SHA256', 0x40: 'DHE-DSS-AES128-SHA256', 0x41: 'CAMELLIA128-SHA', 0x42: 'DH-DSS-CAMELLIA128-SHA', 0x43: 'DH-RSA-CAMELLIA128-SHA', 0x44: 'DHE-DSS-CAMELLIA128-SHA', 0x45: 'DHE-RSA-CAMELLIA128-SHA', 0x46: 'ADH-CAMELLIA128-SHA', 0x62: 'EXP1024-DES-CBC-SHA', 0x63: 'EXP1024-DHE-DSS-DES-CBC-SHA', 0x64: 'EXP1024-RC4-SHA', 0x65: 'EXP1024-DHE-DSS-RC4-SHA', 0x66: 'DHE-DSS-RC4-SHA', 0x67: 'DHE-RSA-AES128-SHA256', 0x68: 'DH-DSS-AES256-SHA256', 0x69: 'DH-RSA-AES256-SHA256', 0x6a: 'DHE-DSS-AES256-SHA256', 0x6b: 'DHE-RSA-AES256-SHA256', 0x6c: 'ADH-AES128-SHA256', 0x6d: 'ADH-AES256-SHA256', 0x80: 'GOST94-GOST89-GOST89', 0x81: 'GOST2001-GOST89-GOST89', 0x82: 'GOST94-NULL-GOST94', 0x83: 'GOST2001-GOST89-GOST89', 0x84: 'CAMELLIA256-SHA', 0x85: 'DH-DSS-CAMELLIA256-SHA', 0x86: 'DH-RSA-CAMELLIA256-SHA', 0x87: 'DHE-DSS-CAMELLIA256-SHA', 0x88: 'DHE-RSA-CAMELLIA256-SHA', 0x89: 'ADH-CAMELLIA256-SHA', 0x8a: 'PSK-RC4-SHA', 0x8b: 'PSK-3DES-EDE-CBC-SHA', 0x8c: 'PSK-AES128-CBC-SHA', 0x8d: 'PSK-AES256-CBC-SHA', # 0x8e: , # 0x8f: , # 0x90: , # 0x91: , # 0x92: , # 0x93: , # 0x94: , # 0x95: , 0x96: 'SEED-SHA', 0x97: 'DH-DSS-SEED-SHA', 0x98: 'DH-RSA-SEED-SHA', 0x99: 'DHE-DSS-SEED-SHA', 0x9a: 'DHE-RSA-SEED-SHA', 0x9b: 'ADH-SEED-SHA', 0x9c: 'AES128-GCM-SHA256', 0x9d: 'AES256-GCM-SHA384', 0x9e: 'DHE-RSA-AES128-GCM-SHA256', 0x9f: 'DHE-RSA-AES256-GCM-SHA384', 0xa0: 'DH-RSA-AES128-GCM-SHA256', 0xa1: 'DH-RSA-AES256-GCM-SHA384', 0xa2: 'DHE-DSS-AES128-GCM-SHA256', 0xa3: 'DHE-DSS-AES256-GCM-SHA384', 0xa4: 'DH-DSS-AES128-GCM-SHA256', 0xa5: 'DH-DSS-AES256-GCM-SHA384', 0xa6: 'ADH-AES128-GCM-SHA256', 0xa7: 'ADH-AES256-GCM-SHA384', 0x5600: 'TLS_FALLBACK_SCSV', 0xc001: 'ECDH-ECDSA-NULL-SHA', 0xc002: 'ECDH-ECDSA-RC4-SHA', 0xc003: 'ECDH-ECDSA-DES-CBC3-SHA', 0xc004: 'ECDH-ECDSA-AES128-SHA', 0xc005: 'ECDH-ECDSA-AES256-SHA', 0xc006: 'ECDHE-ECDSA-NULL-SHA', 0xc007: 'ECDHE-ECDSA-RC4-SHA', 0xc008: 'ECDHE-ECDSA-DES-CBC3-SHA', 0xc009: 'ECDHE-ECDSA-AES128-SHA', 0xc00a: 'ECDHE-ECDSA-AES256-SHA', 0xc00b: 'ECDH-RSA-NULL-SHA', 0xc00c: 'ECDH-RSA-RC4-SHA', 0xc00d: 'ECDH-RSA-DES-CBC3-SHA', 0xc00e: 'ECDH-RSA-AES128-SHA', 0xc00f: 'ECDH-RSA-AES256-SHA', 0xc010: 'ECDHE-RSA-NULL-SHA', 0xc011: 'ECDHE-RSA-RC4-SHA', 0xc012: 'ECDHE-RSA-DES-CBC3-SHA', 0xc013: 'ECDHE-RSA-AES128-SHA', 0xc014: 'ECDHE-RSA-AES256-SHA', 0xc015: 'AECDH-NULL-SHA', 0xc016: 'AECDH-RC4-SHA', 0xc017: 'AECDH-DES-CBC3-SHA', 0xc018: 'AECDH-AES128-SHA', 0xc019: 'AECDH-AES256-SHA', 0xc01a: 'SRP-3DES-EDE-CBC-SHA', 0xc01b: 'SRP-RSA-3DES-EDE-CBC-SHA', 0xc01c: 'SRP-DSS-3DES-EDE-CBC-SHA', 0xc01d: 'SRP-AES-128-CBC-SHA', 0xc01e: 'SRP-RSA-AES-128-CBC-SHA', 0xc01f: 'SRP-DSS-AES-128-CBC-SHA', 0xc020: 'SRP-AES-256-CBC-SHA', 0xc021: 'SRP-RSA-AES-256-CBC-SHA', 0xc022: 'SRP-DSS-AES-256-CBC-SHA', 0xc023: 'ECDHE-ECDSA-AES128-SHA256', 0xc024: 'ECDHE-ECDSA-AES256-SHA384', 0xc025: 'ECDH-ECDSA-AES128-SHA256', 0xc026: 'ECDH-ECDSA-AES256-SHA384', 0xc027: 'ECDHE-RSA-AES128-SHA256', 0xc028: 'ECDHE-RSA-AES256-SHA384', 0xc029: 'ECDH-RSA-AES128-SHA256', 0xc02a: 'ECDH-RSA-AES256-SHA384', 0xc02b: 'ECDHE-ECDSA-AES128-GCM-SHA256', 0xc02c: 'ECDHE-ECDSA-AES256-GCM-SHA384', 0xc02d: 'ECDH-ECDSA-AES128-GCM-SHA256', 0xc02e: 'ECDH-ECDSA-AES256-GCM-SHA384', 0xc02f: 'ECDHE-RSA-AES128-GCM-SHA256', 0xc030: 'ECDHE-RSA-AES256-GCM-SHA384', 0xc031: 'ECDH-RSA-AES128-GCM-SHA256', 0xc032: 'ECDH-RSA-AES256-GCM-SHA384', 0xcc13: 'ECDHE-RSA-CHACHA20-POLY1305', 0xcc14: 'ECDHE-ECDSA-CHACHA20-POLY1305', 0xcc15: 'DHE-RSA-CHACHA20-POLY1305', 0xff00: 'GOST-MD5', 0xff01: 'GOST-GOST94', 0xff02: 'GOST-GOST89MAC', 0xff03: 'GOST-GOST89STREAM', 0x010080: 'RC4-MD5', 0x020080: 'EXP-RC4-MD5', 0x030080: 'RC2-CBC-MD5', 0x040080: 'EXP-RC2-CBC-MD5', 0x050080: 'IDEA-CBC-MD5', 0x060040: 'DES-CBC-MD5', 0x0700c0: 'DES-CBC3-MD5', 0x080080: 'RC4-64-MD5', } def is_tls_record_magic(d): """ Returns: True, if the passed bytes start with the TLS record magic bytes. False, otherwise. """ d = d[:3] # TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2 # http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello if six.PY2: return ( len(d) == 3 and d[0] == '\x16' and d[1] == '\x03' and d[2] in ('\x00', '\x01', '\x02', '\x03') ) else: return ( len(d) == 3 and d[0] == 0x16 and d[1] == 0x03 and 0x0 <= d[2] <= 0x03 ) def get_client_hello(client_conn): """ Peek into the socket and read all records that contain the initial client hello message. client_conn: The :py:class:`client connection <mitmproxy.models.ClientConnection>`. Returns: The raw handshake packet bytes, without TLS record header(s). """ client_hello = b"" client_hello_size = 1 offset = 0 while len(client_hello) < client_hello_size: record_header = client_conn.rfile.peek(offset + 5)[offset:] if not is_tls_record_magic(record_header) or len(record_header) != 5: raise exceptions.TlsProtocolException('Expected TLS record, got "%s" instead.' % record_header) record_size = struct.unpack("!H", record_header[3:])[0] + 5 record_body = client_conn.rfile.peek(offset + record_size)[offset + 5:] if len(record_body) != record_size - 5: raise exceptions.TlsProtocolException("Unexpected EOF in TLS handshake: %s" % record_body) client_hello += record_body offset += record_size client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4 return client_hello class TlsClientHello(object): def __init__(self, raw_client_hello): self._client_hello = _constructs.ClientHello.parse(raw_client_hello) def raw(self): return self._client_hello @property def cipher_suites(self): return self._client_hello.cipher_suites.cipher_suites @property def sni(self): for extension in self._client_hello.extensions: is_valid_sni_extension = ( extension.type == 0x00 and len(extension.server_names) == 1 and extension.server_names[0].type == 0 and utils.is_valid_host(extension.server_names[0].name) ) if is_valid_sni_extension: return extension.server_names[0].name.decode("idna") @property def alpn_protocols(self): for extension in self._client_hello.extensions: if extension.type == 0x10: return list(extension.alpn_protocols) @classmethod def from_client_conn(cls, client_conn): """ Peek into the connection, read the initial client hello and parse it to obtain ALPN values. client_conn: The :py:class:`client connection <mitmproxy.models.ClientConnection>`. Returns: :py:class:`client hello <mitmproxy.protocol.tls.TlsClientHello>`. """ try: raw_client_hello = get_client_hello(client_conn)[4:] # exclude handshake header. except exceptions.ProtocolException as e: raise exceptions.TlsProtocolException('Cannot read raw Client Hello: %s' % repr(e)) try: return cls(raw_client_hello) except construct.ConstructError as e: raise exceptions.TlsProtocolException( 'Cannot parse Client Hello: %s, Raw Client Hello: %s' % (repr(e), raw_client_hello.encode("hex")) ) def __repr__(self): return "TlsClientHello( sni: %s alpn_protocols: %s, cipher_suites: %s)" % \ (self.sni, self.alpn_protocols, self.cipher_suites) class TlsLayer(base.Layer): """ The TLS layer implements transparent TLS connections. It exposes the following API to child layers: - :py:meth:`set_server_tls` to modify TLS settings for the server connection. - :py:attr:`server_tls`, :py:attr:`server_sni` as read-only attributes describing the current TLS settings for the server connection. """ def __init__(self, ctx, client_tls, server_tls): super(TlsLayer, self).__init__(ctx) self._client_tls = client_tls self._server_tls = server_tls self._custom_server_sni = None self._client_hello = None # type: TlsClientHello def __call__(self): """ The strategy for establishing TLS is as follows: First, we determine whether we need the server cert to establish ssl with the client. If so, we first connect to the server and then to the client. If not, we only connect to the client and do the server handshake lazily. An additional complexity is that we need to mirror SNI and ALPN from the client when connecting to the server. We manually peek into the connection and parse the ClientHello message to obtain these values. """ if self._client_tls: # Peek into the connection, read the initial client hello and parse it to obtain SNI and ALPN values. try: self._client_hello = TlsClientHello.from_client_conn(self.client_conn) except exceptions.TlsProtocolException as e: self.log("Cannot parse Client Hello: %s" % repr(e), "error") # Do we need to do a server handshake now? # There are two reasons why we would want to establish TLS with the server now: # 1. If we already have an existing server connection and server_tls is True, # we need to establish TLS now because .connect() will not be called anymore. # 2. We may need information from the server connection for the client handshake. # # A couple of factors influence (2): # 2.1 There actually is (or will be) a TLS-enabled upstream connection # 2.2 An upstream connection is not wanted by the user if --no-upstream-cert is passed. # 2.3 An upstream connection is implied by add_upstream_certs_to_client_chain # 2.4 The client wants to negotiate an alternative protocol in its handshake, we need to find out # what is supported by the server # 2.5 The client did not sent a SNI value, we don't know the certificate subject. client_tls_requires_server_connection = ( self._server_tls and not self.config.options.no_upstream_cert and ( self.config.options.add_upstream_certs_to_client_chain or self._client_hello.alpn_protocols or not self._client_hello.sni ) ) establish_server_tls_now = ( (self.server_conn and self._server_tls) or client_tls_requires_server_connection ) if self._client_tls and establish_server_tls_now: self._establish_tls_with_client_and_server() elif self._client_tls: self._establish_tls_with_client() elif establish_server_tls_now: self._establish_tls_with_server() layer = self.ctx.next_layer(self) layer() def __repr__(self): # pragma: no cover if self._client_tls and self._server_tls: return "TlsLayer(client and server)" elif self._client_tls: return "TlsLayer(client)" elif self._server_tls: return "TlsLayer(server)" else: return "TlsLayer(inactive)" def connect(self): if not self.server_conn: self.ctx.connect() if self._server_tls and not self.server_conn.tls_established: self._establish_tls_with_server() def set_server_tls(self, server_tls, sni=None): # type: (bool, Union[six.text_type, None, False]) -> None """ Set the TLS settings for the next server connection that will be established. This function will not alter an existing connection. Args: server_tls: Shall we establish TLS with the server? sni: ``str`` for a custom SNI value, ``None`` for the client SNI value, ``False`` if no SNI value should be sent. """ self._server_tls = server_tls self._custom_server_sni = sni @property def server_tls(self): """ ``True``, if the next server connection that will be established should be upgraded to TLS. """ return self._server_tls @property def server_sni(self): """ The Server Name Indication we want to send with the next server TLS handshake. """ if self._custom_server_sni is False: return None else: return self._custom_server_sni or self._client_hello.sni @property def alpn_for_client_connection(self): return self.server_conn.get_alpn_proto_negotiated() def __alpn_select_callback(self, conn_, options): # This gets triggered if we haven't established an upstream connection yet. default_alpn = b'http/1.1' # alpn_preference = b'h2' if self.alpn_for_client_connection in options: choice = bytes(self.alpn_for_client_connection) elif default_alpn in options: choice = bytes(default_alpn) else: choice = options[0] self.log("ALPN for client: %s" % choice, "debug") return choice def _establish_tls_with_client_and_server(self): try: self.ctx.connect() self._establish_tls_with_server() except Exception: # If establishing TLS with the server fails, we try to establish TLS with the client nonetheless # to send an error message over TLS. try: self._establish_tls_with_client() except: pass six.reraise(*sys.exc_info()) self._establish_tls_with_client() def _establish_tls_with_client(self): self.log("Establish TLS with client", "debug") cert, key, chain_file = self._find_cert() if self.config.options.add_upstream_certs_to_client_chain: extra_certs = self.server_conn.server_certs else: extra_certs = None try: self.client_conn.convert_to_ssl( cert, key, method=self.config.openssl_method_client, options=self.config.openssl_options_client, cipher_list=self.config.options.ciphers_client, dhparams=self.config.certstore.dhparams, chain_file=chain_file, alpn_select_callback=self.__alpn_select_callback, extra_chain_certs=extra_certs, ) # Some TLS clients will not fail the handshake, # but will immediately throw an "unexpected eof" error on the first read. # The reason for this might be difficult to find, so we try to peek here to see if it # raises ann error. self.client_conn.rfile.peek(1) except netlib.exceptions.TlsException as e: six.reraise( exceptions.ClientHandshakeException, exceptions.ClientHandshakeException( "Cannot establish TLS with client (sni: {sni}): {e}".format( sni=self._client_hello.sni, e=repr(e) ), self._client_hello.sni or repr(self.server_conn.address) ), sys.exc_info()[2] ) def _establish_tls_with_server(self): self.log("Establish TLS with server", "debug") try: # We only support http/1.1 and h2. # If the server only supports spdy (next to http/1.1), it may select that # and mitmproxy would enter TCP passthrough mode, which we want to avoid. def deprecated_http2_variant(x): return x.startswith(b"h2-") or x.startswith(b"spdy") if self._client_hello.alpn_protocols: alpn = [x for x in self._client_hello.alpn_protocols if not deprecated_http2_variant(x)] else: alpn = None if alpn and b"h2" in alpn and not self.config.options.http2: alpn.remove(b"h2") ciphers_server = self.config.options.ciphers_server if not ciphers_server: ciphers_server = [] for id in self._client_hello.cipher_suites: if id in CIPHER_ID_NAME_MAP.keys(): ciphers_server.append(CIPHER_ID_NAME_MAP[id]) ciphers_server = ':'.join(ciphers_server) self.server_conn.establish_ssl( self.config.clientcerts, self.server_sni, method=self.config.openssl_method_server, options=self.config.openssl_options_server, verify_options=self.config.openssl_verification_mode_server, ca_path=self.config.options.ssl_verify_upstream_trusted_cadir, ca_pemfile=self.config.options.ssl_verify_upstream_trusted_ca, cipher_list=ciphers_server, alpn_protos=alpn, ) tls_cert_err = self.server_conn.ssl_verification_error if tls_cert_err is not None: self.log(str(tls_cert_err), "warn") self.log("Ignoring server verification error, continuing with connection", "warn") except netlib.exceptions.InvalidCertificateException as e: six.reraise( exceptions.InvalidServerCertificate, exceptions.InvalidServerCertificate(str(e)), sys.exc_info()[2] ) except netlib.exceptions.TlsException as e: six.reraise( exceptions.TlsProtocolException, exceptions.TlsProtocolException("Cannot establish TLS with {address} (sni: {sni}): {e}".format( address=repr(self.server_conn.address), sni=self.server_sni, e=repr(e), )), sys.exc_info()[2] ) self.log("ALPN selected by server: %s" % self.alpn_for_client_connection, "debug") def _find_cert(self): """ This function determines the Common Name (CN) and Subject Alternative Names (SANs) our certificate should have and then fetches a matching cert from the certstore. """ host = None sans = set() # In normal operation, the server address should always be known at this point. # However, we may just want to establish TLS so that we can send an error message to the client, # in which case the address can be None. if self.server_conn.address: host = self.server_conn.address.host.encode("idna") # Should we incorporate information from the server certificate? use_upstream_cert = ( self.server_conn and self.server_conn.tls_established and (not self.config.options.no_upstream_cert) ) if use_upstream_cert: upstream_cert = self.server_conn.cert sans.update(upstream_cert.altnames) if upstream_cert.cn: sans.add(host) host = upstream_cert.cn.decode("utf8").encode("idna") # Also add SNI values. if self._client_hello.sni: sans.add(self._client_hello.sni.encode("idna")) if self._custom_server_sni: sans.add(self._custom_server_sni.encode("idna")) # RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity. # In other words, the Common Name is irrelevant then. if host: sans.add(host) return self.config.certstore.get_cert(host, list(sans))
{ "content_hash": "ecf8f604476f718962fbd11609522fa6", "timestamp": "", "source": "github", "line_count": 603, "max_line_length": 118, "avg_line_length": 37.41293532338308, "alnum_prop": 0.5966312056737588, "repo_name": "jvillacorta/mitmproxy", "id": "d08e2e329afc1c7c3fa0633a96babdfa45be6e64", "size": "22560", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mitmproxy/protocol/tls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "186239" }, { "name": "HTML", "bytes": "3034" }, { "name": "JavaScript", "bytes": "146506" }, { "name": "PowerShell", "bytes": "362" }, { "name": "Python", "bytes": "1270398" }, { "name": "Shell", "bytes": "3717" } ], "symlink_target": "" }
import tornado.web import tornado.websocket import tornado.httpserver import tornado.ioloop import serial import threading import asyncio import time clients = [] serialready = True class ThreadReadSerial(threading.Thread): running = 1 serialbuff = "" def __init__(self): threading.Thread.__init__(self) self.stop_event = threading.Event() self.serialbuff = "" def run(self): global clients global ser asyncio.set_event_loop(asyncio.new_event_loop()) while not self.stop_event.is_set(): try: data = ser.read().decode() if len(data)!=0: if data!='\n' and data!='\r' and data!='>': self.serialbuff = self.serialbuff + data if data == '\n' and self.serialbuff!="": for c in clients: c.write_message(self.serialbuff) self.serialbuff = "" except Exception as e: print('error1'+str(e)) except serial.SerialException as es: print('error ser '+str(es)) class ThreadBroadcastHealth(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.stop_event = threading.Event() def run(self): global ser while not self.stop_event.is_set(): try: time.sleep(2) battinforequest = bytearray('v\n', 'utf-8') ser.write(battinforequest) # print('request bat info') except Exception as e: print('error health '+str(e)) class WebSocketHandler(tornado.websocket.WebSocketHandler): connections = set() def check_origin(self, origin): return True def open(self): self.connections.add(self) self.set_nodelay(True) # ser.write("1"); # ser.write("m"); global clients clients.append(self) print('new connection was opened') pass def on_message(self, message): global ser #print('from WebSocket: ', message try: if message=='shutdown': b = bytearray('p!999\n', 'utf-8') ser.write(b) # received from WebSocket writen to arduino ser.flushInput() ser.reset_input_buffer() ser.reset_output_buffer() else: b = bytearray(message+'\n', 'utf-8') ser.write(b) # received from WebSocket writen to arduino ser.flushInput() ser.reset_input_buffer() ser.reset_output_buffer() # ser.write("ff?\n") except Exception as e: print('error2'+str(e)) def on_close(self): self.connections.remove(self) global clients clients.remove(self) print('connection closed') pass class Application(tornado.web.Application): def __init__(self): handlers = [ (r'/websocket', WebSocketHandler), (r'/(.*)', tornado.web.StaticFileHandler, {'path': './root'}) ] settings = { 'template_path': 'templates' } tornado.web.Application.__init__(self, handlers, **settings) if __name__ == '__main__': ser = serial.Serial('/dev/ttyUSB0', 38400, timeout=1) print(ser.name) ser.flushInput() ser.reset_input_buffer() ser.reset_output_buffer() ws_app = Application() server = tornado.httpserver.HTTPServer(ws_app) server.listen(9090) serreadthread = ThreadReadSerial() serreadthread.daemon = True serreadthread.start() readthreadhealth = ThreadBroadcastHealth() readthreadhealth.daemon = True readthreadhealth.start() tornado.ioloop.IOLoop.instance().start()
{ "content_hash": "9d5ff7d1f90d6bab9fc4c7b08ff10f1d", "timestamp": "", "source": "github", "line_count": 138, "max_line_length": 66, "avg_line_length": 23.44927536231884, "alnum_prop": 0.6600741656365884, "repo_name": "hackffm/SpaceShuttle", "id": "fd3e886f9e83c88d9495ce6428e93b05ebf01ddb", "size": "3261", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "stacks/undercarriage/src/raspi/python/drive.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "208145" }, { "name": "C++", "bytes": "29417" }, { "name": "CSS", "bytes": "2557" }, { "name": "HTML", "bytes": "91582" }, { "name": "JavaScript", "bytes": "127660" }, { "name": "OpenSCAD", "bytes": "1108422" }, { "name": "Processing", "bytes": "7662" }, { "name": "Python", "bytes": "5994" }, { "name": "Shell", "bytes": "1694" } ], "symlink_target": "" }
"""Dataset for chromosome ID task with baseline-formatted examples.""" import functools from typing import Optional from absl import logging from flax import jax_utils import jax import jax.numpy as jnp import numpy as np from scenic.dataset_lib import dataset_utils from scenic.dataset_lib import datasets import tensorflow as tf NUM_TRAIN_EXAMPLES = 368_106 NUM_TEST_EXAMPLES = 40_620 NUM_CLASSES = 24 NUM_CHANNELS = 1 # TODO(shamsiz) Filter out abnormal karyograms in (99,49) and (149,69) datasets. # path to data's base directory DATASET_BASE_DIRS = { (99, 49): '', (149, 69): '', (199, 99): '', } @datasets.add_dataset('chrmID_baseline') def get_dataset( *, batch_size, eval_batch_size, num_shards, dtype_str='float32', shuffle_seed=0, rng=None, prefetch_buffer_size=2, dataset_configs=None, dataset_service_address: Optional[str] = None): """Returns generators for the chrmID train, validation, and test set. Args: batch_size: int; Determines the train batch size. eval_batch_size: int; Determines the evaluation batch size. num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...]. dtype_str: Data type of the image (e.g. 'float32'). shuffle_seed: int; Seed for shuffling the training data. rng: JAX rng key, which can be used for augmentation, shuffling, etc. prefetch_buffer_size: int; Buffer size for the prefetch. dataset_configs: dict; Dataset specific configurations. dataset_service_address: If set, will distribute the training dataset using the given tf.data service at the given address. Returns: A dataset_utils.Dataset() which includes a train_iter, a valid_iter, a test_iter, and a dict of meta_data. """ del rng try: dataset_base_dir = DATASET_BASE_DIRS[tuple( dataset_configs.chrm_image_shape)] except KeyError as key_error: raise ValueError('No dataset found matching "%s"; options: %r' % (dataset_configs.chrm_image_shape, DATASET_BASE_DIRS.keys())) from key_error def build_baseline_dataset(split='train', shuffle=False): """dataset_fn called by data.build_dataset(**kwargs).""" parallel_reads = 4 if shuffle else 1 ds = load_data( dataset_base_dir, split, parallel_reads=parallel_reads) ds = ds.map( lambda x: preprocess(x, 'label', dataset_configs.chrm_image_shape)) return ds # use different seed for each host if shuffle_seed is None: local_seed = None else: data_seed = 0 local_seed = data_seed + jax.process_index() train_dataset = build_dataset( dataset_fn=build_baseline_dataset, batch_size=batch_size, seed=local_seed, split='train', strategy=None) if dataset_service_address: if shuffle_seed is not None: raise ValueError('Using dataset service with a random seed causes each ' 'worker to produce exactly the same data. Add ' 'config.shuffle_seed = None to your config if you ' 'want to run with dataset service.') logging.info('Using the tf.data service at %s', dataset_service_address) train_dataset = dataset_utils.distribute(train_dataset, dataset_service_address) eval_dataset = build_dataset( dataset_fn=build_baseline_dataset, split='valid', batch_size=eval_batch_size, strategy=None) test_dataset = build_dataset( dataset_fn=build_baseline_dataset, split='test', batch_size=eval_batch_size, strategy=None) shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards) maybe_pad_batches_train = functools.partial( dataset_utils.maybe_pad_batch, train=True, batch_size=batch_size) maybe_pad_batches_eval = functools.partial( dataset_utils.maybe_pad_batch, train=False, batch_size=eval_batch_size) train_iter = iter(train_dataset) train_iter = map(dataset_utils.tf_to_numpy, train_iter) train_iter = map(maybe_pad_batches_train, train_iter) train_iter = map(shard_batches, train_iter) train_iter = jax_utils.prefetch_to_device(train_iter, prefetch_buffer_size) valid_iter = iter(eval_dataset) valid_iter = map(dataset_utils.tf_to_numpy, valid_iter) valid_iter = map(maybe_pad_batches_eval, valid_iter) valid_iter = map(shard_batches, valid_iter) valid_iter = jax_utils.prefetch_to_device(valid_iter, prefetch_buffer_size) test_iter = iter(test_dataset) test_iter = map(dataset_utils.tf_to_numpy, test_iter) test_iter = map(maybe_pad_batches_eval, test_iter) test_iter = map(shard_batches, test_iter) test_iter = jax_utils.prefetch_to_device(test_iter, prefetch_buffer_size) input_shape = [-1] + list(dataset_configs.chrm_image_shape) + [NUM_CHANNELS] meta_data = { 'num_classes': NUM_CLASSES, 'input_shape': input_shape, 'num_train_examples': NUM_TRAIN_EXAMPLES, 'num_eval_examples': NUM_TEST_EXAMPLES, 'input_dtype': getattr(jnp, dtype_str), 'target_is_onehot': True, } return dataset_utils.Dataset(train_iter, valid_iter, test_iter, meta_data) def parse_example(serialized_example): """Parses feature dictionary from the `serialized_example` proto. Args: serialized_example: The proto of the current example. Returns: A parsed example as dict with several elements. """ feature_description = { 'chrm_img': tf.io.FixedLenFeature([], tf.string, default_value=''), 'meta_img': tf.io.FixedLenFeature([], tf.string, default_value=''), 'label': tf.io.FixedLenFeature([], tf.string, default_value='') } features = tf.io.parse_single_example(serialized_example, feature_description) return features def load_data(base_dir, split, parallel_reads=4): """Loads the metaphase dataset. Args: base_dir: Base directory containing dataset as tfrecords. split: str; One of 'train', 'eval' or 'test'. parallel_reads: int; Number of parallel readers (set to 1 for determinism). Returns: tf.data.Datasets for training, testing and validation. """ num_hosts = jax.process_count() host_id = jax.process_index() if split == 'train': path = base_dir + '[0-7]-00*' elif split == 'validation': path = base_dir + '8-00*' else: path = base_dir + '9-00*' # We shard the data between different hosts and create a Dataset that includes # only 1/num_shards of full dataset. filenames = tf.io.matching_files(path) filenames_host_split = np.array_split(filenames, num_hosts)[host_id] files = tf.data.Dataset.list_files(filenames_host_split) data = files.interleave( tf.data.TFRecordDataset, cycle_length=1 if split != 'train' else parallel_reads, num_parallel_calls=tf.data.experimental.AUTOTUNE) data = data.map( parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) return data def build_dataset(dataset_fn, batch_size=None, shuffle_buffer_size=256, seed=None, strategy=None, **dataset_kwargs): """Dataset builder that takes care of strategy, batching and shuffling. Args: dataset_fn: function; A function that loads the dataset. batch_size: int; Size of the batch. shuffle_buffer_size: int; Size of the buffer for used for shuffling. seed: int; Random seed used for shuffling. strategy: TF strategy that handles the distribution policy. **dataset_kwargs: dict; Arguments passed to TFDS. Returns: Dataset. """ def _dataset_fn(input_context=None): """Dataset function.""" replica_batch_size = batch_size if input_context: replica_batch_size = input_context.get_per_replica_batch_size(batch_size) ds = dataset_fn(**dataset_kwargs) split = dataset_kwargs.get('split') if split == 'train': # first repeat then shuffle, then batch ds = ds.repeat() local_seed = seed # seed for this machine if local_seed is not None and input_context: local_seed += input_context.input_pipeline_id ds = ds.shuffle(shuffle_buffer_size, seed=local_seed) ds = ds.batch(replica_batch_size, drop_remainder=True) else: # test and validation # first batch then repeat ds = ds.batch(replica_batch_size, drop_remainder=False) ds = ds.repeat() options = tf.data.Options() options.experimental_optimization.parallel_batch = True ds = ds.with_options(options) return ds.prefetch(tf.data.experimental.AUTOTUNE) if strategy: ds = strategy.experimental_distribute_datasets_from_function(_dataset_fn) else: ds = _dataset_fn() return ds def preprocess(features, label_key, chrm_image_shape): """Preprocessing code specific to metaphase images.""" if isinstance(label_key, str): labels = features[label_key] else: labels = tuple(features[k] for k in label_key) class_names = tf.convert_to_tensor([ b'chrm_%d' % i for i in range(1, 23)] + [b'chrm_X', b'chrm_Y']) chrm = tf.reshape( tf.io.decode_raw(features['chrm_img'], tf.float32), tuple(chrm_image_shape) + (1,)) labels = labels == class_names # Creates one-hot label. return { 'inputs': chrm, 'label': labels }
{ "content_hash": "2c18fbbb4cb82903248450e109149a91", "timestamp": "", "source": "github", "line_count": 281, "max_line_length": 80, "avg_line_length": 33.195729537366546, "alnum_prop": 0.6677744425385935, "repo_name": "google-research/scenic", "id": "a3977510594dc3a26c8d97ee1b951414e19f9c24", "size": "9328", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "scenic/projects/tasseo/datasets/chrmID_baseline_dataset.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "1717873" }, { "name": "Python", "bytes": "3692184" } ], "symlink_target": "" }
import string a = """ Daar zijn we weer, met nog een puzzel! Af en toe willen wij als Puzzelmakers ook iets zeggen. We gaan ieder jaar weer hard voor de puzzels, en ook dit jaar maken we het weer bont... Wat top dat dat ieder jaar ook weer kan en mag. Het systematisch werk gaat ook echter weer door. Afijn, onnozel geneuzel terzijde... Wat een duister en vreemd jaar was 2020. Met virale gevaren, vergane vakantieplannen... En uitgestelde evenementen. Maar wij laten de pret niet drukken! Hierom dus, emblematisch van het hele verhaal... Een puzzel in fraaie, niet te negeren woorden. Wat? Jij dacht: “Mooi niet, de puzzel is nog niet begonnen”? Dat heb jij mis! Aanschouw de puzzel: je zit er middenin! In dit mooie tekstgeweld zit een raadsel verscholen. Want dit jaar was al cryptisch... Maar nu is het nog ongelofelijker! Eén op één, ook twee op twee... Houdt dit gedoe u in gedwee! Dus, om samen te vatten: schroom niet! Negeer interpunctie, ook qua spaties. En vertel, o lezer, aan ons uw super-oplossing! Welke wereldberoemdheid is verscholen in deze puzzel? Hint: het aantal regels is ook noemenswaardig! """.strip() a = a.lower() a = ''.join([letter for letter in a if letter in f'{string.ascii_lowercase}\n']) # for index, line in enumerate(a.split('\n'), start=1): # print(index, line[-index]) for i in range(100): print(i, ''.join([line[-i-1] for line in a.split('\n')]))
{ "content_hash": "e2984b1f3214915916e8c784211b3d2f", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 135, "avg_line_length": 39.94285714285714, "alnum_prop": 0.7367668097281831, "repo_name": "physicalattraction/kerstpuzzel", "id": "8524c23442e10607f4cb9869d575d923b68d5eac", "size": "1405", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/DEC2020/boodschap.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "201993" }, { "name": "XSLT", "bytes": "1442" } ], "symlink_target": "" }
import os from django.core.management.base import BaseCommand from tweepy import Client class Command(BaseCommand): """ Twitter GET /2/users/me から自分の user_id を取得し、コンソールに表示する https://docs.tweepy.org/en/stable/client.html#tweepy.Client.get_me なお、取得した user_id は .env や環境変数に設定すること """ def handle(self, *args, **options): client = Client( consumer_key=os.environ['TWITTER_CONSUMER_KEY'], consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'], access_token=os.environ['TWITTER_ACCESS_TOKEN'], access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'] ) response = client.get_me() print(response) # => Response(data=<User id=*** name=thinkAmi username=thinkAmi>, includes={}, errors=[], meta={})
{ "content_hash": "f6c513775a4970cfdc1e869a6c841d0f", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 106, "avg_line_length": 35.391304347826086, "alnum_prop": 0.6437346437346437, "repo_name": "thinkAmi/dj_ringo_tabetter", "id": "a9c48c13b7dfdeabf22ce076e97694b60af4393a", "size": "894", "binary": false, "copies": "1", "ref": "refs/heads/development", "path": "apps/tweets/management/commands/get_user_id.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "1263" }, { "name": "JavaScript", "bytes": "2450" }, { "name": "Jinja", "bytes": "1633" }, { "name": "Python", "bytes": "37288" }, { "name": "Shell", "bytes": "1291" } ], "symlink_target": "" }
import os import shutil import tempfile import zipfile from logging import FileHandler from django.core.files.base import ContentFile from django.core.urlresolvers import reverse from django.test import TransactionTestCase from django.test.utils import override_settings from .. import update from ..models import Artist, Album, Song, CustomStorage TEST_MEDIA_DIR = tempfile.mkdtemp() TEST_DROPBOX_DIR = tempfile.mkdtemp() TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), 'files') @override_settings(MEDIA_ROOT=TEST_MEDIA_DIR, DROPBOX=TEST_DROPBOX_DIR) class ViewTest(TransactionTestCase): reset_sequences = True def setUp(self): self.media_dir = TEST_MEDIA_DIR self.dropbox = TEST_DROPBOX_DIR if not os.path.exists(TEST_MEDIA_DIR): os.mkdir(TEST_MEDIA_DIR) if not os.path.exists(TEST_DROPBOX_DIR): os.mkdir(TEST_DROPBOX_DIR) # use a temporary log file self.logfile = tempfile.NamedTemporaryFile(delete=False) default_handler = update.LOGGER.handlers[0] update.LOGGER.removeHandler(default_handler) update.LOGGER.addHandler(FileHandler(self.logfile.name)) self.mutagen_opts = update.get_mutagen_audio_options() # Swap CustomStorage for filefield for testing so that it uses # the temporary media folder instead of settings.MEDIA_ROOT self._field = Song._meta.get_field_by_name('filefield')[0] self._default_storage = self._field.storage test_storage = CustomStorage(location=self.media_dir) self._field.storage = test_storage def tearDown(self): os.remove(self.logfile.name) shutil.rmtree(self.media_dir) shutil.rmtree(self.dropbox) self._field = self._default_storage def assertNoLogError(self): """Asserts that nothing was written to the log file.""" self.assertEqual(os.path.getsize(self.logfile.name), 0) def test_get_song_info_mp3(self): filename = os.path.join(TEST_FILES_DIR, 'testfile.mp3') info = update.get_song_info(filename, self.mutagen_opts) self.assertEqual(info.artist, 'The Artist') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 320000) self.assertNoLogError() def test_get_song_info_mp4(self): filename = os.path.join(TEST_FILES_DIR, 'testfile.m4a') info = update.get_song_info(filename, self.mutagen_opts) self.assertEqual(info.artist, 'The Artist') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 128000) self.assertNoLogError() def test_get_song_info_ogg(self): filename = os.path.join(TEST_FILES_DIR, 'testfile.ogg') info = update.get_song_info(filename, self.mutagen_opts) self.assertEqual(info.artist, 'The Artist') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 160000) self.assertNoLogError() def test_get_song_info_flac(self): filename = os.path.join(TEST_FILES_DIR, 'testfile.flac') info = update.get_song_info(filename, self.mutagen_opts) self.assertEqual(info.artist, 'The Artist') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 0) self.assertNoLogError() def test_get_song_info_wma(self): filename = os.path.join(TEST_FILES_DIR, 'testfile.wma') info = update.get_wma_info(filename) self.assertEqual(info.artist, 'The Artist') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 198000) self.assertNoLogError() def test_get_song_info_unicode_file(self): filename = os.path.join(TEST_FILES_DIR, u'ȕñĭçợƌɇ⸞ƒıℓⱻ.ogg') info = update.get_song_info(filename, self.mutagen_opts) self.assertEqual(info.artist, u'ṫħĕ Ⓐ řⱦⅈṩȶ') self.assertEqual(info.album, 'The Album') self.assertEqual(info.title, 'The Song') self.assertEqual(info.track, '01') self.assertEqual(info.bitrate, 160000) self.assertNoLogError() def test_import_file(self): # put test file in dropbox shutil.copy(os.path.join(TEST_FILES_DIR, 'testfile.ogg'), self.dropbox) filename = os.path.join(self.dropbox, 'testfile.ogg') self.assertFalse(Artist.objects.exists()) self.assertFalse(Album.objects.exists()) self.assertFalse(Song.objects.exists()) self.assertTrue(os.path.exists(filename)) with open(filename, 'rb') as f: original_content = f.read() # import file update.import_file(filename, self.mutagen_opts) song = Song.objects.get(title='The Song') self.assertEqual(song.title, 'The Song') self.assertEqual(song.album.title, 'The Album') self.assertEqual(song.album.artist.name, 'The Artist') self.assertEqual(song.track, '01') self.assertEqual(song.bitrate, 160000) self.assertEqual(song.filetype, 'ogg') self.assertEqual(song.original_path, 'testfile.ogg') self.assertEqual(song.filefield.read(), original_content) self.assertFalse(os.path.exists(filename)) self.assertNoLogError() def test_import_existing_file_with_no_better_bitrate_skips_it(self): # put test files in dropbox shutil.copy(os.path.join(TEST_FILES_DIR, 'testfile.ogg'), self.dropbox) shutil.copy( os.path.join(TEST_FILES_DIR, 'testfile.ogg'), os.path.join(self.dropbox, 'testfile_copy.ogg')) # import files filename = os.path.join(self.dropbox, 'testfile.ogg') update.import_file(filename, self.mutagen_opts) filename = os.path.join(self.dropbox, 'testfile_copy.ogg') update.import_file(filename, self.mutagen_opts) with open(self.logfile.name, 'r') as f: log_record = f.read() self.assertIn('Problem importing file', log_record) self.assertIn('The Song by The Artist already exists', log_record) self.assertTrue(os.path.exists(filename)) def test_import_existing_file_with_better_bitrate_replaces_it(self): # put test files in dropbox shutil.copy( os.path.join(TEST_FILES_DIR, 'testfile-128k.mp3'), self.dropbox) shutil.copy(os.path.join(TEST_FILES_DIR, 'testfile.mp3'), self.dropbox) # import files filename = os.path.join(self.dropbox, 'testfile-128k.mp3') update.import_file(filename, self.mutagen_opts) song = Song.objects.get(title='The Song') self.assertEqual(song.bitrate, 128000) filename = os.path.join(self.dropbox, 'testfile.mp3') update.import_file(filename, self.mutagen_opts) self.assertEqual(Song.objects.count(), 1) song = Song.objects.get(title='The Song') self.assertEqual(song.bitrate, 320000) self.assertNoLogError() def test_importing_unsupported_format_gives_an_error(self): # put file in dropbox and import it shutil.copy(os.path.join(TEST_FILES_DIR, 'testfile.wav'), self.dropbox) filename = os.path.join(self.dropbox, 'testfile.wav') update.import_file(filename, self.mutagen_opts) with open(self.logfile.name, 'r') as f: log_record = f.read() self.assertIn('Problem importing file', log_record) self.assertIn('testfile.wav (Mutagen could not read', log_record) self.assertTrue(os.path.exists(filename)) def test_importing_dummy_file_removes_it_from_dropbox(self): # create the dummy file filename = os.path.join(self.dropbox, '.DS_Store') with open(filename, 'w'): pass self.assertTrue(os.path.exists(filename)) update.update() self.assertFalse(os.path.exists(filename)) self.assertNoLogError() # TODO: check if user is authenticated def test_upload_dropbox_files_to_library(self): # Put some files in dropbox zipped_dropbox = os.path.join(TEST_FILES_DIR, 'test_dropbox.zip') with zipfile.ZipFile(zipped_dropbox, 'r') as f: f.extractall(self.dropbox) self.assertItemsEqual( os.listdir(self.dropbox), ['song1.ogg', 'The Artist', 'Unknown']) self.assertItemsEqual( os.listdir(os.path.join(self.dropbox, 'The Artist')), ['Album', 'song2.ogg', 'song3.ogg']) self.assertItemsEqual( os.listdir(os.path.join(self.dropbox, 'Unknown')), ['song5.ogg', 'song6.ogg']) # make request to upload files to library response = self.client.get(reverse('update_library')) # check that a message has been set in the cookie self.assertTrue('messages' in response.cookies.keys()) self.assertIn( 'Library successfully updated', response.cookies.get('messages').value) # Check that the files have been imported self.assertNoLogError() self.assertEqual(os.listdir(self.dropbox), []) self.assertEqual(Artist.objects.count(), 2) self.assertEqual(Album.objects.count(), 4) self.assertEqual(Song.objects.count(), 6) self.assertItemsEqual(os.listdir(self.media_dir), ['L', 'T']) filename = os.path.join( self.media_dir, 'T', 'The Artist', 'The Album', '04 - The Fourth Song.ogg') self.assertTrue(os.path.exists(filename)) def test_download_artist(self): # Upload some files zipped_dropbox = os.path.join(TEST_FILES_DIR, 'test_dropbox.zip') with zipfile.ZipFile(zipped_dropbox, 'r') as f: f.extractall(self.dropbox) update.update() # make request response = self.client.get(reverse('download_artist', args=[1])) # check response have right headers self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/zip') self.assertEqual( response['Content-Disposition'], 'attachment; filename=The%20Artist.zip') # check structure of returned zip file songs = Song.objects.filter(album__artist=1) original_song_names = [s.filefield.name[2:] for s in songs] content = ContentFile(response.content) self.assertTrue(zipfile.is_zipfile(content)) with zipfile.ZipFile(content, 'r') as z: self.assertIsNone(z.testzip()) self.assertItemsEqual(z.namelist(), original_song_names) def test_download_artist_with_no_songs_redirects_to_detail_view(self): # Create dummy artist a = Artist.objects.create(name='The Artist') # make request response = self.client.get(reverse('download_artist', args=[a.pk])) # check response have right headers self.assertEqual(response.status_code, 302) redirect_url = response.get('Location', '') self.assertEqual( redirect_url, 'http://testserver' + reverse('artist_detail', args=[a.pk])) # check that a message has been set in the cookie self.assertTrue('messages' in response.cookies.keys()) self.assertIn( 'The artist does not have any song', response.cookies.get('messages').value) def test_download_updated_artist_retrieve_the_correct_file_structure(self): pass def test_fetching_url_of_nonexisting_instance_redirects_to_list_view(self): response = self.client.get(reverse('artist_detail', args=[1])) self.assertEqual(response.status_code, 302) redirect_url = response.get('Location', '') self.assertEqual( redirect_url, 'http://testserver' + reverse('artist_list')) def test_fetching_other_nonexisting_url_returns_404(self): response = self.client.get('/spam/and/eggs') self.assertEqual(response.status_code, 404)
{ "content_hash": "bd5a1a34a0c4b699d5a6d1fadca46607", "timestamp": "", "source": "github", "line_count": 321, "max_line_length": 79, "avg_line_length": 38.797507788161994, "alnum_prop": 0.6440501043841336, "repo_name": "rafikdraoui/vortex", "id": "1f49b0b480ba41427379b1e7aca328c64587507d", "size": "12509", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "library/tests/test_views.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "2989" }, { "name": "Python", "bytes": "87711" } ], "symlink_target": "" }
from django.conf.urls import url from src.apps.users.api import views urlpatterns = [ url( regex=r'^getList/$', view=views.GetUserList.as_view(), name='list' ), ]
{ "content_hash": "76162e09f97e816e7f3f7ceb2a6c9602", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 41, "avg_line_length": 15.307692307692308, "alnum_prop": 0.592964824120603, "repo_name": "airportmarc/the416life", "id": "86546745603c69af91560110ff86e458f7e5ebc6", "size": "199", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/apps/users/api/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "18" }, { "name": "CSS", "bytes": "430385" }, { "name": "HTML", "bytes": "174632" }, { "name": "JavaScript", "bytes": "224762" }, { "name": "Python", "bytes": "477212" }, { "name": "Shell", "bytes": "4240" }, { "name": "Vue", "bytes": "80363" } ], "symlink_target": "" }
import atexit import signal from . import configs, exceptions, formats, pa, streams, __version__ __all__ = \ configs.__all__ \ + formats.__all__ \ + exceptions.__all__ \ + pa.__all__ \ + streams.__all__ \ + __version__.__all__ from .configs import * from .exceptions import * from .formats import * from .pa import * from .streams import * from .__version__ import * pa.initialize() atexit.register(pa.terminate) for sig in (signal.SIGTERM, signal.SIGINT, signal.SIGABRT): signal.signal(sig, pa.terminate)
{ "content_hash": "c4ab2998c2b5fdd0cdefaddf612b3025", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 68, "avg_line_length": 18.724137931034484, "alnum_prop": 0.6335174953959485, "repo_name": "elijahr/portaudio-ctypes", "id": "16d9b0e96f0f3fe570b25938e78031ac6a10661b", "size": "543", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/portaudio/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "44169" }, { "name": "Python", "bytes": "53035" } ], "symlink_target": "" }
import sys import numpy array = numpy.array([numpy.array(xs.split(), int) for xs in list(sys.stdin)[1:]]) print(numpy.transpose(array)) print(array.flatten())
{ "content_hash": "85c1ff6dcaa7687642b50d5465a5dedb", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 81, "avg_line_length": 23, "alnum_prop": 0.7204968944099379, "repo_name": "alexander-matsievsky/HackerRank", "id": "99a4963421a18e0c1a5fdfe2dde50933052220d2", "size": "161", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "All_Domains/Python/Numpy/np-transpose-and-flatten.py", "mode": "33188", "license": "mit", "language": [ { "name": "Haskell", "bytes": "26132" }, { "name": "JavaScript", "bytes": "3906" }, { "name": "Python", "bytes": "28757" } ], "symlink_target": "" }
""" Example Airflow DAG for Google Cloud Storage GCSTimeSpanFileTransformOperator operator. """ from __future__ import annotations import os from datetime import datetime from pathlib import Path from airflow import models from airflow.models.baseoperator import chain from airflow.providers.google.cloud.operators.gcs import ( GCSCreateBucketOperator, GCSDeleteBucketOperator, GCSTimeSpanFileTransformOperator, ) from airflow.providers.google.cloud.transfers.local_to_gcs import LocalFilesystemToGCSOperator from airflow.utils.trigger_rule import TriggerRule ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID") PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT") DAG_ID = "gcs_transform_timespan" BUCKET_NAME_SRC = f"bucket_{DAG_ID}_{ENV_ID}" BUCKET_NAME_DST = f"bucket_dst_{DAG_ID}_{ENV_ID}" SOURCE_GCP_CONN_ID = DESTINATION_GCP_CONN_ID = "google_cloud_default" FILE_NAME = "example_upload.txt" SOURCE_PREFIX = "timespan_source" DESTINATION_PREFIX = "timespan_destination" UPLOAD_FILE_PATH = str(Path(__file__).parent / "resources" / FILE_NAME) TRANSFORM_SCRIPT_PATH = str(Path(__file__).parent / "resources" / "transform_timespan.py") with models.DAG( DAG_ID, schedule='@once', start_date=datetime(2021, 1, 1), catchup=False, tags=["gcs", "example"], ) as dag: create_bucket_src = GCSCreateBucketOperator( task_id="create_bucket_src", bucket_name=BUCKET_NAME_SRC, project_id=PROJECT_ID, ) create_bucket_dst = GCSCreateBucketOperator( task_id="create_bucket_dst", bucket_name=BUCKET_NAME_DST, project_id=PROJECT_ID, ) upload_file = LocalFilesystemToGCSOperator( task_id="upload_file", src=UPLOAD_FILE_PATH, dst=f"{SOURCE_PREFIX}/{FILE_NAME}", bucket=BUCKET_NAME_SRC, ) # [START howto_operator_gcs_timespan_file_transform_operator_Task] gcs_timespan_transform_files_task = GCSTimeSpanFileTransformOperator( task_id="gcs_timespan_transform_files", source_bucket=BUCKET_NAME_SRC, source_prefix=SOURCE_PREFIX, source_gcp_conn_id=SOURCE_GCP_CONN_ID, destination_bucket=BUCKET_NAME_DST, destination_prefix=DESTINATION_PREFIX, destination_gcp_conn_id=DESTINATION_GCP_CONN_ID, transform_script=["python", TRANSFORM_SCRIPT_PATH], ) # [END howto_operator_gcs_timespan_file_transform_operator_Task] delete_bucket_src = GCSDeleteBucketOperator( task_id="delete_bucket_src", bucket_name=BUCKET_NAME_SRC, trigger_rule=TriggerRule.ALL_DONE ) delete_bucket_dst = GCSDeleteBucketOperator( task_id="delete_bucket_dst", bucket_name=BUCKET_NAME_DST, trigger_rule=TriggerRule.ALL_DONE ) chain( # TEST SETUP [create_bucket_src, create_bucket_dst], upload_file, # TEST BODY gcs_timespan_transform_files_task, # TEST TEARDOWN [delete_bucket_src, delete_bucket_dst], ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when "tearDown" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
{ "content_hash": "f34c2201959cd96a45245701822d516c", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 99, "avg_line_length": 32.43269230769231, "alnum_prop": 0.69878446486807, "repo_name": "cfei18/incubator-airflow", "id": "bdc7fdd3b67b62f32eb2f17ad5d5d635b7831af3", "size": "4160", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/system/providers/google/cloud/gcs/example_gcs_transform_timespan.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25980" }, { "name": "Dockerfile", "bytes": "72003" }, { "name": "HCL", "bytes": "3786" }, { "name": "HTML", "bytes": "173434" }, { "name": "JavaScript", "bytes": "143068" }, { "name": "Jinja", "bytes": "38808" }, { "name": "Jupyter Notebook", "bytes": "5482" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "22660683" }, { "name": "R", "bytes": "313" }, { "name": "Shell", "bytes": "312715" }, { "name": "TypeScript", "bytes": "472379" } ], "symlink_target": "" }
from concurrent.futures import CancelledError from datetime import datetime from functools import lru_cache from itertools import count import logging from bndl.net.connection import NotConnected from bndl.util.lifecycle import Lifecycle logger = logging.getLogger(__name__) class Job(Lifecycle): ''' A set of :class:`Tasks <Task>` which can be executed on a cluster of workers / executors. ''' _job_ids = count(1) def __init__(self, ctx, tasks, name=None, desc=None): super().__init__(name, desc) self.id = next(self._job_ids) self.ctx = ctx self.tasks = tasks def cancel(self): for task in self.tasks: task.cancel() super().cancel() @lru_cache() def group(self, name): return [t for t in self.tasks if t.group == name] class Task(Lifecycle): ''' Execution of a Task on a executor is the basic unit of scheduling in ``bndl.compute``. ''' def __init__(self, ctx, task_id, *, priority=None, name=None, desc=None, group=None, max_attempts=None): super().__init__(name or 'task ' + str(task_id), desc or 'unknown task ' + str(task_id)) self.ctx = ctx self.id = task_id self.group = group self.max_attempts = max_attempts or ctx.conf['bndl.compute.attempts'] self.priority = task_id if priority is None else priority self.dependencies = set() self.dependents = set() self.blocked = set() self.executed_on = [] self.result_on = [] self.result = None self.exception = None def execute(self, scheduler, executor): ''' Execute the task on a executor. The scheduler is provided as 'context' for the task. Returns a Future for the task's result. ''' raise NotImplemented() def locality(self, executors): ''' Indicate locality for executing this task on executors. Args: executors (sequence[executor]): The executors to determine the locality for. Returns: A sequence of executor - locality tuples. 0 is indifferent and can be skipped, -1 is forbidden, 1+ increasing locality. ''' return () @property def succeeded(self): return self.stopped and not self.exception @property def failed(self): return self.stopped and self.exception is not None @property def attempts(self): return len(self.executed_on) def set_executing(self, executor): '''Utility for sub-classes to register the task as executing on a executor.''' if self.cancelled: raise CancelledError() assert not self.running, '%r running' % self self.result = None self.exception = None self.cancelled = False self.executed_on.append(executor.name) self.result_on.append(executor.name) self.signal_start() def mark_done(self, result=None): '''' Externally mark the task as done. E.g. because its 'side effect' (result) is already available). ''' self.exception = None self.result = result if not self.stopped: if not self.stopped_on: self.stopped_on = datetime.now() if not self.started_on: self.started_on = self.stopped_on self.signal_stop() def mark_failed(self, exc): ''' 'Externally' mark the task as failed. E.g. because the executor which holds the tasks' result has failed / can't be reached. ''' if not self.stopped_on: self.stopped_on = datetime.now() self.result = None self.exception = exc self.signal_stop() def last_executed_on(self): '''The name of the executor this task executed on last (if any).''' try: return self.executed_on[-1] except IndexError: return None def last_result_on(self): '''The name of the node which has the last result (if any).''' try: return self.result_on[-1] except IndexError: return None def reset(self): self.result = None self.exception = None def release(self): '''Release most resources of the task. Invoked after a job's execution is complete.''' self.result = None self.dependencies = set() self.dependents = set() self.blocked = set() self.started_listeners.clear() self.stopped_listeners.clear() def __repr__(self): if self.failed: state = ' failed' elif self.succeeded: state = ' succeeded' elif self.running: state = ' running' else: state = '' return '<%s %s%s>' % (self.__class__.__name__, self.id_str, state) @property def id_str(self): return '.'.join(map(str, self.id)) if isinstance(self.id, tuple) else self.id class RmiTask(Task): ''' A task which performs a Remote Method Invocation to execute a method with positional and keyword arguments. ''' def __init__(self, ctx, task_id, method, args=(), kwargs=None, *, priority=None, name=None, desc=None, group=None): super().__init__(ctx, task_id, priority=priority, name=name, desc=desc, group=group) self.method = method self.args = args self.kwargs = kwargs or {} self.handle = None def execute(self, scheduler, executor): assert self.args is not None and self.kwargs is not None assert not self.succeeded, '%r already running?' % (self) self.set_executing(executor) schedule_future = executor.service('tasks').execute_async(self.method, *self.args, **self.kwargs) schedule_future.executor = executor schedule_future.add_done_callback(self._task_scheduled) def _last_executor(self): if self.executed_on: return self.ctx.node.peers.get(self.executed_on[-1]) def _task_scheduled(self, schedule_future): try: handle = schedule_future.result() except Exception as exc: self.mark_failed(exc) else: if self.cancelled: self._cancel(handle) else: self.handle = handle try: schedule_future.executor.service('tasks') \ .get_task_result(self.handle) \ .add_done_callback(self._task_completed) except NotConnected as exc: self.mark_failed(exc) def _task_completed(self, future): try: self.handle = None self.result = future.result() except Exception as exc: self.exception = exc finally: self.signal_stop() def cancel(self): super().cancel() if self.handle: logger.debug('Canceling %s', self) self._cancel(self.handle) self.handle = None def _cancel(self, handle): return self._last_executor().service('tasks').cancel_task(handle) def mark_failed(self, exc): self.handle = None super().mark_failed(exc) def release(self): super().release() self.method = self.method.__name__ self.handle = None self.args = None self.kwargs = None self.locality = None
{ "content_hash": "7face2d369d544475237beb0ca1f96ba", "timestamp": "", "source": "github", "line_count": 268, "max_line_length": 119, "avg_line_length": 28.182835820895523, "alnum_prop": 0.5754005031113465, "repo_name": "bndl/bndl", "id": "fa23e19afc836c0f519fda8ac0cbb891d834e942", "size": "8098", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bndl/compute/job.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Cython", "bytes": "20307" }, { "name": "HTML", "bytes": "13357" }, { "name": "Makefile", "bytes": "669" }, { "name": "Python", "bytes": "549892" } ], "symlink_target": "" }
import data, pygame, math from pymunk.vec2d import Vec2d class Texture(object): def __init__(self, image): self.image = image self.width = self.image.get_width() self.height = self.image.get_height() self.center = (self.width/2, self.height/2) def wrld_to_img(verts, min_v): m_x, m_y = min_v return [(x-m_x,-(y-m_y)) for x,y in verts] class TextureManager(object): def __init__(self): self._textures = {} self.clear_texture_maps() def register_texture(self, name, filename): image = data.load_image(filename) self._textures[name] = Texture(image) def flip_texture(self, name, new_name): texture = self.get_texture(name) self._textures[new_name] = Texture(pygame.transform.flip(texture.image, True, False)) def get_texture(self, name): return self._textures[name] def get_texture_map(self, entity, texture_name): if (entity,texture_name) not in self._texture_maps: self.texture_map_entity(entity, texture_name) return self._texture_maps[(entity, texture_name)] def texture_map_entity(self, entity, texture_name): if not texture_name: return texture = self.get_texture(texture_name) min_v,max_v = entity.get_bounding_rect() ent_width = max_v[0] - min_v[0] ent_height = max_v[1] - min_v[1] texture_sheet = pygame.Surface((int(ent_width), int(ent_height))).convert_alpha() texture_sheet.fill((0,0,0,0)) num_x_copies = int(math.ceil(ent_width / texture.width)) num_y_copies = int(math.ceil(ent_height / texture.height)) for i in range(num_x_copies): for j in range(num_y_copies): dest = (i*texture.width, j*texture.height) texture_sheet.blit(texture.image, dest) final_texture = Texture(texture_sheet) self._texture_maps[(entity, texture_name)] = final_texture return final_texture def clear_texture_maps(self): self._texture_maps = {}
{ "content_hash": "e54951010bfbf90c90b42a330c980df9", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 93, "avg_line_length": 32.95238095238095, "alnum_prop": 0.6078998073217726, "repo_name": "goosemo/monkey", "id": "1eb5d2ba2adcfcb0ef50085e61e1a99be18ef6c1", "size": "2076", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gamelib/texture.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "147931" } ], "symlink_target": "" }
from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals from __future__ import division from future import standard_library standard_library.install_aliases() from builtins import * # noqa from hamcrest import assert_that, contains, contains_inanyorder, has_entries from nose.tools import eq_ from pprint import pformat import requests from ycmd.tests.javascript import IsolatedYcmd, PathToTestFile, SharedYcmd from ycmd.tests.test_utils import ( BuildRequest, ChunkMatcher, ErrorMatcher, LocationMatcher, WaitUntilCompleterServerReady ) from ycmd.utils import ReadFile @SharedYcmd def Subcommands_DefinedSubcommands_test( app ): subcommands_data = BuildRequest( completer_target = 'javascript' ) eq_( sorted( [ 'GoToDefinition', 'GoTo', 'GetDoc', 'GetType', 'GoToReferences', 'RefactorRename', 'RestartServer' ] ), app.post_json( '/defined_subcommands', subcommands_data ).json ) def RunTest( app, test ): contents = ReadFile( test[ 'request' ][ 'filepath' ] ) def CombineRequest( request, data ): kw = request request.update( data ) return BuildRequest( **kw ) # Because we aren't testing this command, we *always* ignore errors. This # is mainly because we (may) want to test scenarios where the completer # throws an exception and the easiest way to do that is to throw from # within the FlagsForFile function. app.post_json( '/event_notification', CombineRequest( test[ 'request' ], { 'event_name': 'FileReadyToParse', 'contents': contents, 'filetype': 'javascript', } ), expect_errors = True ) # We also ignore errors here, but then we check the response code # ourself. This is to allow testing of requests returning errors. response = app.post_json( '/run_completer_command', CombineRequest( test[ 'request' ], { 'completer_target': 'filetype_default', 'contents': contents, 'filetype': 'javascript', 'command_arguments': ( [ test[ 'request' ][ 'command' ] ] + test[ 'request' ].get( 'arguments', [] ) ) } ), expect_errors = True ) print( 'completer response: {0}'.format( pformat( response.json ) ) ) eq_( response.status_code, test[ 'expect' ][ 'response' ] ) assert_that( response.json, test[ 'expect' ][ 'data' ] ) @SharedYcmd def Subcommands_GoToDefinition_test( app ): RunTest( app, { 'description': 'GoToDefinition works within file', 'request': { 'command': 'GoToDefinition', 'line_num': 13, 'column_num': 25, 'filepath': PathToTestFile( 'simple_test.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'filepath': PathToTestFile( 'simple_test.js' ), 'line_num': 1, 'column_num': 5, } ) } } ) @SharedYcmd def Subcommands_GoToDefinition_Unicode_test( app ): RunTest( app, { 'description': 'GoToDefinition works within file with unicode', 'request': { 'command': 'GoToDefinition', 'line_num': 11, 'column_num': 12, 'filepath': PathToTestFile( 'unicode.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'filepath': PathToTestFile( 'unicode.js' ), 'line_num': 6, 'column_num': 26, } ) } } ) @SharedYcmd def Subcommands_GoTo_test( app ): RunTest( app, { 'description': 'GoTo works the same as GoToDefinition within file', 'request': { 'command': 'GoTo', 'line_num': 13, 'column_num': 25, 'filepath': PathToTestFile( 'simple_test.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'filepath': PathToTestFile( 'simple_test.js' ), 'line_num': 1, 'column_num': 5, } ) } } ) @SharedYcmd def Subcommands_GetDoc_test( app ): RunTest( app, { 'description': 'GetDoc works within file', 'request': { 'command': 'GetDoc', 'line_num': 7, 'column_num': 16, 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'detailed_info': ( 'Name: mine_bitcoin\n' 'Type: fn(how_much: ?) -> number\n\n' 'This function takes a number and invests it in bitcoin. It ' 'returns\nthe expected value (in notional currency) after 1 year.' ) } ) } } ) @SharedYcmd def Subcommands_GetType_test( app ): RunTest( app, { 'description': 'GetType works within file', 'request': { 'command': 'GetType', 'line_num': 11, 'column_num': 14, 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'message': 'number' } ) } } ) @SharedYcmd def Subcommands_GoToReferences_test( app ): RunTest( app, { 'description': 'GoToReferences works within file', 'request': { 'command': 'GoToReferences', 'line_num': 17, 'column_num': 29, 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': contains_inanyorder( has_entries( { 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), 'line_num': 17, 'column_num': 29, } ), has_entries( { 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), 'line_num': 12, 'column_num': 9, } ) ) } } ) @SharedYcmd def Subcommands_GoToReferences_Unicode_test( app ): RunTest( app, { 'description': 'GoToReferences works within file with unicode chars', 'request': { 'command': 'GoToReferences', 'line_num': 11, 'column_num': 5, 'filepath': PathToTestFile( 'unicode.js' ), }, 'expect': { 'response': requests.codes.ok, 'data': contains_inanyorder( has_entries( { 'filepath': PathToTestFile( 'unicode.js' ), 'line_num': 5, 'column_num': 5, } ), has_entries( { 'filepath': PathToTestFile( 'unicode.js' ), 'line_num': 9, 'column_num': 1, } ), has_entries( { 'filepath': PathToTestFile( 'unicode.js' ), 'line_num': 11, 'column_num': 1, } ) ) } } ) @SharedYcmd def Subcommands_GetDocWithNoItendifier_test( app ): RunTest( app, { 'description': 'GetDoc works when no identifier', 'request': { 'command': 'GetDoc', 'filepath': PathToTestFile( 'simple_test.js' ), 'line_num': 12, 'column_num': 1, }, 'expect': { 'response': requests.codes.internal_server_error, 'data': ErrorMatcher( RuntimeError, 'TernError: No type found ' 'at the given position.' ), } } ) @SharedYcmd def Subcommands_RefactorRename_Simple_test( app ): filepath = PathToTestFile( 'simple_test.js' ) RunTest( app, { 'description': 'RefactorRename works within a single scope/file', 'request': { 'command': 'RefactorRename', 'arguments': [ 'test' ], 'filepath': filepath, 'line_num': 15, 'column_num': 32, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries ( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'test', LocationMatcher( filepath, 1, 5 ), LocationMatcher( filepath, 1, 22 ) ), ChunkMatcher( 'test', LocationMatcher( filepath, 13, 25 ), LocationMatcher( filepath, 13, 42 ) ), ChunkMatcher( 'test', LocationMatcher( filepath, 14, 24 ), LocationMatcher( filepath, 14, 41 ) ), ChunkMatcher( 'test', LocationMatcher( filepath, 15, 24 ), LocationMatcher( filepath, 15, 41 ) ), ChunkMatcher( 'test', LocationMatcher( filepath, 21, 7 ), LocationMatcher( filepath, 21, 24 ) ), # On the same line, ensuring offsets are as expected (as # unmodified source, similar to clang) ChunkMatcher( 'test', LocationMatcher( filepath, 21, 28 ), LocationMatcher( filepath, 21, 45 ) ), ), 'location': LocationMatcher( filepath, 15, 32 ) } ) ) } ) } } ) @SharedYcmd def Subcommands_RefactorRename_MultipleFiles_test( app ): file1 = PathToTestFile( 'file1.js' ) file2 = PathToTestFile( 'file2.js' ) file3 = PathToTestFile( 'file3.js' ) RunTest( app, { 'description': 'RefactorRename works across files', 'request': { 'command': 'RefactorRename', 'arguments': [ 'a-quite-long-string' ], 'filepath': file1, 'line_num': 3, 'column_num': 14, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries ( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 1, 5 ), LocationMatcher( file1, 1, 11 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 3, 14 ), LocationMatcher( file1, 3, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file2, 2, 14 ), LocationMatcher( file2, 2, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file3, 3, 12 ), LocationMatcher( file3, 3, 18 ) ) ), 'location': LocationMatcher( file1, 3, 14 ) } ) ) } ) } } ) # Needs to be isolated to prevent interfering with other tests (this test loads # an extra file into tern's project memory) @IsolatedYcmd def Subcommands_RefactorRename_MultipleFiles_OnFileReadyToParse_test( app ): WaitUntilCompleterServerReady( app, 'javascript' ) file1 = PathToTestFile( 'file1.js' ) file2 = PathToTestFile( 'file2.js' ) file3 = PathToTestFile( 'file3.js' ) # This test is roughly the same as the previous one, except here file4.js is # pushed into the Tern engine via 'opening it in the editor' (i.e. # FileReadyToParse event). The first 3 are loaded into the tern server # because they are listed in the .tern-project file's loadEagerly option. file4 = PathToTestFile( 'file4.js' ) app.post_json( '/event_notification', BuildRequest( **{ 'filetype': 'javascript', 'event_name': 'FileReadyToParse', 'contents': ReadFile( file4 ), 'filepath': file4, } ), expect_errors = False ) RunTest( app, { 'description': 'FileReadyToParse loads files into tern server', 'request': { 'command': 'RefactorRename', 'arguments': [ 'a-quite-long-string' ], 'filepath': file1, 'line_num': 3, 'column_num': 14, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 1, 5 ), LocationMatcher( file1, 1, 11 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 3, 14 ), LocationMatcher( file1, 3, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file2, 2, 14 ), LocationMatcher( file2, 2, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file3, 3, 12 ), LocationMatcher( file3, 3, 18 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file4, 4, 22 ), LocationMatcher( file4, 4, 28 ) ) ), 'location': LocationMatcher( file1, 3, 14 ) } ) ) } ) } } ) @SharedYcmd def Subcommands_RefactorRename_Missing_New_Name_test( app ): RunTest( app, { 'description': 'RefactorRename raises an error without new name', 'request': { 'command': 'RefactorRename', 'line_num': 17, 'column_num': 29, 'filepath': PathToTestFile( 'coollib', 'cool_object.js' ), }, 'expect': { 'response': requests.codes.internal_server_error, 'data': ErrorMatcher( ValueError, 'Please specify a new name to rename it to.\n' 'Usage: RefactorRename <new name>' ), } } ) @SharedYcmd def Subcommands_RefactorRename_Unicode_test( app ): filepath = PathToTestFile( 'unicode.js' ) RunTest( app, { 'description': 'RefactorRename works with unicode identifiers', 'request': { 'command': 'RefactorRename', 'arguments': [ '†es†' ], 'filepath': filepath, 'line_num': 11, 'column_num': 3, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries ( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( '†es†', LocationMatcher( filepath, 5, 5 ), LocationMatcher( filepath, 5, 13 ) ), ChunkMatcher( '†es†', LocationMatcher( filepath, 9, 1 ), LocationMatcher( filepath, 9, 9 ) ), ChunkMatcher( '†es†', LocationMatcher( filepath, 11, 1 ), LocationMatcher( filepath, 11, 9 ) ) ), 'location': LocationMatcher( filepath, 11, 3 ) } ) ) } ) } } )
{ "content_hash": "347ca9c868c10955048e2cec9a20b113", "timestamp": "", "source": "github", "line_count": 470, "max_line_length": 79, "avg_line_length": 30.993617021276595, "alnum_prop": 0.5341525365552275, "repo_name": "rfguri/vimfiles", "id": "9712474d688abf6d9b9cb708fbcfdbe2557b9101", "size": "15284", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bundle/ycm/third_party/ycmd/ycmd/tests/javascript/subcommands_test.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
""" Short description. """ # Copyright © 2015 1&1 Group <git@1and1.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, unicode_literals, print_function import os import sys # from . import … __all__ = []
{ "content_hash": "8bd7686cdf25b7819b58e24bc2770d37", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 74, "avg_line_length": 31.166666666666668, "alnum_prop": 0.7366310160427807, "repo_name": "1and1/confluencer", "id": "d6093f3f963565b4a17baa2e8b312c15d007686f", "size": "810", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "project.d/skeleton_module.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "80139" } ], "symlink_target": "" }
"""Python wrappers for Datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import threading import warnings import numpy as np import six from tensorflow.python.compat import compat from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import nest from tensorflow.python.data.util import random_seed from tensorflow.python.data.util import sparse from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import smart_cond from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import gen_io_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import string_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export("data.Dataset") class Dataset(object): """Represents a potentially large set of elements. A `Dataset` can be used to represent an input pipeline as a collection of elements (nested structures of tensors) and a "logical plan" of transformations that act on those elements. """ __metaclass__ = abc.ABCMeta def __init__(self): pass def _as_serialized_graph(self): """Produces serialized graph representation of the dataset. Returns: A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a serialized graph. """ return gen_dataset_ops.dataset_to_graph(self._as_variant_tensor()) @abc.abstractmethod def _as_variant_tensor(self): """Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset. Returns: A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset. """ raise NotImplementedError("Dataset._as_variant_tensor") def make_initializable_iterator(self, shared_name=None): """Creates an `Iterator` for enumerating the elements of this dataset. Note: The returned iterator will be in an uninitialized state, and you must run the `iterator.initializer` operation before using it: ```python dataset = ... iterator = dataset.make_initializable_iterator() # ... sess.run(iterator.initializer) ``` Args: shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: An `Iterator` over the elements of this dataset. Raises: RuntimeError: If eager execution is enabled. """ if context.executing_eagerly(): raise RuntimeError( "dataset.make_initializable_iterator is not supported when eager " "execution is enabled.") if shared_name is None: shared_name = "" if compat.forward_compatible(2018, 8, 3): iterator_resource = gen_dataset_ops.iterator_v2( container="", shared_name=shared_name, **flat_structure(self)) else: iterator_resource = gen_dataset_ops.iterator( container="", shared_name=shared_name, **flat_structure(self)) with ops.colocate_with(iterator_resource): initializer = gen_dataset_ops.make_iterator(self._as_variant_tensor(), iterator_resource) return iterator_ops.Iterator(iterator_resource, initializer, self.output_types, self.output_shapes, self.output_classes) def __iter__(self): """Creates an `Iterator` for enumerating the elements of this dataset. The returned iterator implements the Python iterator protocol and therefore can only be used in eager mode. Returns: An `Iterator` over the elements of this dataset. Raises: RuntimeError: If eager execution is not enabled. """ if context.executing_eagerly(): return iterator_ops.EagerIterator(self) else: raise RuntimeError("dataset.__iter__() is only supported when eager " "execution is enabled.") def make_one_shot_iterator(self): """Creates an `Iterator` for enumerating the elements of this dataset. Note: The returned iterator will be initialized automatically. A "one-shot" iterator does not currently support re-initialization. Returns: An `Iterator` over the elements of this dataset. """ if context.executing_eagerly(): return iterator_ops.EagerIterator(self) # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is # a 0-argument function. @function.Defun(capture_by_value=True) def _make_dataset(): return self._as_variant_tensor() # pylint: disable=protected-access try: _make_dataset.add_to_graph(ops.get_default_graph()) except ValueError as err: if "Cannot capture a stateful node" in str(err): raise ValueError( "Failed to create a one-shot iterator for a dataset. " "`Dataset.make_one_shot_iterator()` does not support datasets that " "capture stateful objects, such as a `Variable` or `LookupTable`. " "In these cases, use `Dataset.make_initializable_iterator()`. " "(Original error: %s)" % err) else: six.reraise(ValueError, err) return iterator_ops.Iterator( gen_dataset_ops.one_shot_iterator( dataset_factory=_make_dataset, **flat_structure(self)), None, self.output_types, self.output_shapes, self.output_classes) @abc.abstractproperty def output_classes(self): """Returns the class of each component of an element of this dataset. The expected values are `tf.Tensor` and `tf.SparseTensor`. Returns: A nested structure of Python `type` objects corresponding to each component of an element of this dataset. """ raise NotImplementedError("Dataset.output_classes") @abc.abstractproperty def output_shapes(self): """Returns the shape of each component of an element of this dataset. Returns: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. """ raise NotImplementedError("Dataset.output_shapes") @abc.abstractproperty def output_types(self): """Returns the type of each component of an element of this dataset. Returns: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. """ raise NotImplementedError("Dataset.output_types") def __repr__(self): output_shapes = nest.map_structure(str, self.output_shapes) output_shapes = str(output_shapes).replace("'", "") output_types = nest.map_structure(repr, self.output_types) output_types = str(output_types).replace("'", "") return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes, output_types)) @staticmethod def from_tensors(tensors): """Creates a `Dataset` with a single element, comprising the given tensors. Note that if `tensors` contains a NumPy array, and eager execution is not enabled, the values will be embedded in the graph as one or more `tf.constant` operations. For large datasets (> 1 GB), this can waste memory and run into byte limits of graph serialization. If tensors contains one or more large NumPy arrays, consider the alternative described in [this guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays). Args: tensors: A nested structure of tensors. Returns: Dataset: A `Dataset`. """ return TensorDataset(tensors) @staticmethod def from_tensor_slices(tensors): """Creates a `Dataset` whose elements are slices of the given tensors. Note that if `tensors` contains a NumPy array, and eager execution is not enabled, the values will be embedded in the graph as one or more `tf.constant` operations. For large datasets (> 1 GB), this can waste memory and run into byte limits of graph serialization. If tensors contains one or more large NumPy arrays, consider the alternative described in [this guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays). Args: tensors: A nested structure of tensors, each having the same size in the 0th dimension. Returns: Dataset: A `Dataset`. """ return TensorSliceDataset(tensors) @staticmethod @deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.") def from_sparse_tensor_slices(sparse_tensor): """Splits each rank-N `tf.SparseTensor` in this dataset row-wise. Args: sparse_tensor: A `tf.SparseTensor`. Returns: Dataset: A `Dataset` of rank-(N-1) sparse tensors. """ return SparseTensorSliceDataset(sparse_tensor) class _GeneratorState(object): """Stores outstanding iterators created from a Python generator. This class keeps track of potentially multiple iterators that may have been created from a generator, e.g. in the case that the dataset is repeated, or nested within a parallel computation. """ def __init__(self, generator): self._generator = generator self._lock = threading.Lock() self._next_id = 0 # GUARDED_BY(self._lock) self._args = {} self._iterators = {} def get_next_id(self, *args): with self._lock: ret = self._next_id self._next_id += 1 self._args[ret] = args # NOTE(mrry): Explicitly create an array of `np.int64` because implicit # casting in `py_func()` will create an array of `np.int32` on Windows, # leading to a runtime error. return np.array(ret, dtype=np.int64) def get_iterator(self, iterator_id): try: return self._iterators[iterator_id] except KeyError: iterator = iter(self._generator(*self._args.pop(iterator_id))) self._iterators[iterator_id] = iterator return iterator def iterator_completed(self, iterator_id): del self._iterators[iterator_id] @staticmethod def from_generator(generator, output_types, output_shapes=None, args=None): """Creates a `Dataset` whose elements are generated by `generator`. The `generator` argument must be a callable object that returns an object that support the `iter()` protocol (e.g. a generator function). The elements generated by `generator` must be compatible with the given `output_types` and (optional) `output_shapes` arguments. For example: ```python import itertools def gen(): for i in itertools.count(1): yield (i, [1] * i) ds = Dataset.from_generator( gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None]))) value = ds.make_one_shot_iterator().get_next() sess.run(value) # (1, array([1])) sess.run(value) # (2, array([1, 1])) ``` NOTE: The current implementation of `Dataset.from_generator()` uses `tf.py_func` and inherits the same constraints. In particular, it requires the `Dataset`- and `Iterator`-related operations to be placed on a device in the same process as the Python program that called `Dataset.from_generator()`. The body of `generator` will not be serialized in a `GraphDef`, and you should not use this method if you need to serialize your model and restore it in a different environment. NOTE: If `generator` depends on mutable global variables or other external state, be aware that the runtime may invoke `generator` multiple times (in order to support repeating the `Dataset`) and at any time between the call to `Dataset.from_generator()` and the production of the first element from the generator. Mutating global variables or external state can cause undefined behavior, and we recommend that you explicitly cache any external state in `generator` before calling `Dataset.from_generator()`. Args: generator: A callable object that returns an object that supports the `iter()` protocol. If `args` is not specified, `generator` must take no arguments; otherwise it must take as many arguments as there are values in `args`. output_types: A nested structure of `tf.DType` objects corresponding to each component of an element yielded by `generator`. output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects corresponding to each component of an element yielded by `generator`. args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated and passed to `generator` as NumPy-array arguments. Returns: Dataset: A `Dataset`. """ if not callable(generator): raise TypeError("`generator` must be callable.") if output_shapes is None: output_shapes = nest.map_structure( lambda _: tensor_shape.TensorShape(None), output_types) else: output_shapes = nest.map_structure_up_to( output_types, tensor_shape.as_shape, output_shapes) if args is None: args = () else: args = tuple(ops.convert_n_to_tensor(args, name="args")) flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)] flattened_shapes = nest.flatten(output_shapes) generator_state = Dataset._GeneratorState(generator) def get_iterator_id_fn(unused_dummy): """Creates a unique `iterator_id` for each pass over the dataset. The returned `iterator_id` disambiguates between multiple concurrently existing iterators. Args: unused_dummy: Ignored value. Returns: A `tf.int64` tensor whose value uniquely identifies an iterator in `generator_state`. """ return script_ops.py_func( generator_state.get_next_id, args, dtypes.int64, stateful=True) def generator_next_fn(iterator_id_t): """Generates the next element from iterator with ID `iterator_id_t`. We map this function across an infinite repetition of the `iterator_id_t`, and raise `StopIteration` to terminate the iteration. Args: iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the iterator in `generator_state` from which to generate an element. Returns: A nested structure of tensors representing an element from the iterator. """ def generator_py_func(iterator_id): """A `py_func` that will be called to invoke the iterator.""" # `next()` raises `StopIteration` when there are no more # elements remaining to be generated. values = next(generator_state.get_iterator(iterator_id)) # Use the same _convert function from the py_func() implementation to # convert the returned values to arrays early, so that we can inspect # their values. try: flattened_values = nest.flatten_up_to(output_types, values) except (TypeError, ValueError): raise TypeError( "`generator` yielded an element that did not match the expected " "structure. The expected structure was %s, but the yielded " "element was %s." % (output_types, values)) ret_arrays = [] for ret, dtype in zip(flattened_values, flattened_types): try: ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access ret, dtype=dtype.as_numpy_dtype)) except (TypeError, ValueError): raise TypeError( "`generator` yielded an element that could not be converted to " "the expected type. The expected type was %s, but the yielded " "element was %s." % (dtype.name, ret)) # Additional type and shape checking to ensure that the components # of the generated element match the `output_types` and `output_shapes` # arguments. for (ret_array, expected_dtype, expected_shape) in zip( ret_arrays, flattened_types, flattened_shapes): if ret_array.dtype != expected_dtype.as_numpy_dtype: raise TypeError( "`generator` yielded an element of type %s where an element " "of type %s was expected." % (ret_array.dtype, expected_dtype.as_numpy_dtype)) if not expected_shape.is_compatible_with(ret_array.shape): raise ValueError( "`generator` yielded an element of shape %s where an element " "of shape %s was expected." % (ret_array.shape, expected_shape)) return ret_arrays flat_values = script_ops.py_func( generator_py_func, [iterator_id_t], flattened_types, stateful=True) # The `py_func()` op drops the inferred shapes, so we add them back in # here. if output_shapes is not None: for ret_t, shape in zip(flat_values, flattened_shapes): ret_t.set_shape(shape) return nest.pack_sequence_as(output_types, flat_values) def finalize_fn(iterator_id_t): """Releases host-side state for the iterator with ID `iterator_id_t`.""" def finalize_py_func(iterator_id): generator_state.iterator_completed(iterator_id) # We return a dummy value so that the `finalize_fn` has a valid # signature. # NOTE(mrry): Explicitly create an array of `np.int64` because implicit # casting in `py_func()` will create an array of `np.int32` on Windows, # leading to a runtime error. return np.array(0, dtype=np.int64) return script_ops.py_func( finalize_py_func, [iterator_id_t], dtypes.int64, stateful=True) # This function associates each traversal of `generator` with a unique # iterator ID. def flat_map_fn(dummy_arg): # The `get_iterator_id_fn` gets a unique ID for the current instance of # of the generator. # The `generator_next_fn` gets the next element from the iterator with the # given ID, and raises StopIteration when that iterator contains no # more elements. return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn, finalize_fn) # A single-element dataset that, each time it is evaluated, contains a # freshly-generated and unique (for the returned dataset) int64 # ID that will be used to identify the appropriate Python state, which # is encapsulated in `generator_state`, and captured in # `get_iterator_id_map_fn`. dummy = 0 id_dataset = Dataset.from_tensors(dummy) # A dataset that contains all of the elements generated by a # single iterator created from `generator`, identified by the # iterator ID contained in `id_dataset`. Lifting the iteration # into a flat_map here enables multiple repetitions and/or nested # versions of the returned dataset to be created, because it forces # the generation of a new ID for each version. return id_dataset.flat_map(flat_map_fn) @staticmethod def range(*args): """Creates a `Dataset` of a step-separated range of values. For example: ```python Dataset.range(5) == [0, 1, 2, 3, 4] Dataset.range(2, 5) == [2, 3, 4] Dataset.range(1, 5, 2) == [1, 3] Dataset.range(1, 5, -2) == [] Dataset.range(5, 1) == [] Dataset.range(5, 1, -2) == [5, 3] ``` Args: *args: follow same semantics as python's xrange. len(args) == 1 -> start = 0, stop = args[0], step = 1 len(args) == 2 -> start = args[0], stop = args[1], step = 1 len(args) == 3 -> start = args[0], stop = args[1, stop = args[2] Returns: Dataset: A `RangeDataset`. Raises: ValueError: if len(args) == 0. """ return RangeDataset(*args) @staticmethod def zip(datasets): """Creates a `Dataset` by zipping together the given datasets. This method has similar semantics to the built-in `zip()` function in Python, with the main difference being that the `datasets` argument can be an arbitrary nested structure of `Dataset` objects. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3 } b = { 4, 5, 6 } c = { (7, 8), (9, 10), (11, 12) } d = { 13, 14 } # The nested structure of the `datasets` argument determines the # structure of elements in the resulting dataset. Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) } Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) } # The `datasets` argument may contain an arbitrary number of # datasets. Dataset.zip((a, b, c)) == { (1, 4, (7, 8)), (2, 5, (9, 10)), (3, 6, (11, 12)) } # The number of elements in the resulting dataset is the same as # the size of the smallest dataset in `datasets`. Dataset.zip((a, d)) == { (1, 13), (2, 14) } ``` Args: datasets: A nested structure of datasets. Returns: Dataset: A `Dataset`. """ return ZipDataset(datasets) def concatenate(self, dataset): """Creates a `Dataset` by concatenating given dataset with this dataset. ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3 } b = { 4, 5, 6, 7 } # Input dataset and dataset to be concatenated should have same # nested structures and output types. # c = { (8, 9), (10, 11), (12, 13) } # d = { 14.0, 15.0, 16.0 } # a.concatenate(c) and a.concatenate(d) would result in error. a.concatenate(b) == { 1, 2, 3, 4, 5, 6, 7 } ``` Args: dataset: `Dataset` to be concatenated. Returns: Dataset: A `Dataset`. """ return ConcatenateDataset(self, dataset) def prefetch(self, buffer_size): """Creates a `Dataset` that prefetches elements from this dataset. Args: buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum number of elements that will be buffered when prefetching. Returns: Dataset: A `Dataset`. """ return PrefetchDataset(self, buffer_size) @staticmethod def list_files(file_pattern, shuffle=None, seed=None): """A dataset of all files matching a pattern. NOTE: The default behavior of this method is to return filenames in a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False` to get results in a deterministic order. Example: If we had the following files on our filesystem: - /path/to/dir/a.txt - /path/to/dir/b.py - /path/to/dir/c.py If we pass "/path/to/dir/*.py" as the directory, the dataset would produce: - /path/to/dir/b.py - /path/to/dir/c.py Args: file_pattern: A string or scalar string `tf.Tensor`, representing the filename pattern that will be matched. shuffle: (Optional.) If `True`, the file names will be shuffled randomly. Defaults to `True`. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.set_random_seed` for behavior. Returns: Dataset: A `Dataset` of strings corresponding to file names. """ with ops.name_scope("list_files"): if shuffle is None: shuffle = True file_pattern = ops.convert_to_tensor( file_pattern, dtype=dtypes.string, name="file_pattern") matching_files = gen_io_ops.matching_files(file_pattern) # Raise an exception if `file_pattern` does not match any files. condition = math_ops.greater(array_ops.shape(matching_files)[0], 0, name="match_not_empty") message = math_ops.add( "No files matched pattern: ", string_ops.reduce_join(file_pattern, separator=", "), name="message") assert_not_empty = control_flow_ops.Assert( condition, [message], summarize=1, name="assert_not_empty") with ops.control_dependencies([assert_not_empty]): matching_files = array_ops.identity(matching_files) dataset = Dataset.from_tensor_slices(matching_files) if shuffle: # NOTE(mrry): The shuffle buffer size must be greater than zero, but the # list of files might be empty. buffer_size = math_ops.maximum( array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1) dataset = dataset.shuffle(buffer_size, seed=seed) return dataset def repeat(self, count=None): """Repeats this dataset `count` times. NOTE: If this dataset is a function of global state (e.g. a random number generator), then different repetitions may produce different elements. Args: count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of times the dataset should be repeated. The default behavior (if `count` is `None` or `-1`) is for the dataset be repeated indefinitely. Returns: Dataset: A `Dataset`. """ return RepeatDataset(self, count) def _enumerate(self, start=0): max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max return Dataset.zip((Dataset.range(start, max_value), self)) def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None): """Randomly shuffles the elements of this dataset. Args: buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements from this dataset from which the new dataset will sample. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.set_random_seed` for behavior. reshuffle_each_iteration: (Optional.) A boolean, which if true indicates that the dataset should be pseudorandomly reshuffled each time it is iterated over. (Defaults to `True`.) Returns: Dataset: A `Dataset`. """ return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration) def cache(self, filename=""): """Caches the elements in this dataset. Args: filename: A `tf.string` scalar `tf.Tensor`, representing the name of a directory on the filesystem to use for caching tensors in this Dataset. If a filename is not provided, the dataset will be cached in memory. Returns: Dataset: A `Dataset`. """ return CacheDataset(self, filename) def take(self, count): """Creates a `Dataset` with at most `count` elements from this dataset. Args: count: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of this dataset that should be taken to form the new dataset. If `count` is -1, or if `count` is greater than the size of this dataset, the new dataset will contain all elements of this dataset. Returns: Dataset: A `Dataset`. """ return TakeDataset(self, count) def skip(self, count): """Creates a `Dataset` that skips `count` elements from this dataset. Args: count: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of this dataset that should be skipped to form the new dataset. If `count` is greater than the size of this dataset, the new dataset will contain no elements. If `count` is -1, skips the entire dataset. Returns: Dataset: A `Dataset`. """ return SkipDataset(self, count) def shard(self, num_shards, index): """Creates a `Dataset` that includes only 1/`num_shards` of this dataset. This dataset operator is very useful when running distributed training, as it allows each worker to read a unique subset. When reading a single input file, you can skip elements as follows: ```python d = tf.data.TFRecordDataset(FLAGS.input_file) d = d.shard(FLAGS.num_workers, FLAGS.worker_index) d = d.repeat(FLAGS.num_epochs) d = d.shuffle(FLAGS.shuffle_buffer_size) d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads) ``` Important caveats: - Be sure to shard before you use any randomizing operator (such as shuffle). - Generally it is best if the shard operator is used early in the dataset pipeline. For example, when reading from a set of TFRecord files, shard before converting the dataset to input samples. This avoids reading every file on every worker. The following is an example of an efficient sharding strategy within a complete pipeline: ```python d = Dataset.list_files(FLAGS.pattern) d = d.shard(FLAGS.num_workers, FLAGS.worker_index) d = d.repeat(FLAGS.num_epochs) d = d.shuffle(FLAGS.shuffle_buffer_size) d = d.interleave(tf.data.TFRecordDataset, cycle_length=FLAGS.num_readers, block_length=1) d = d.map(parser_fn, num_parallel_calls=FLAGS.num_map_threads) ``` Args: num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of shards operating in parallel. index: A `tf.int64` scalar `tf.Tensor`, representing the worker index. Returns: Dataset: A `Dataset`. Raises: ValueError: if `num_shards` or `index` are illegal values. Note: error checking is done on a best-effort basis, and aren't guaranteed to be caught upon dataset creation. (e.g. providing in a placeholder tensor bypasses the early checking, and will instead result in an error during a session.run call.) """ num_shards = ops.convert_to_tensor( num_shards, name="num_shards", dtype=dtypes.int64) num_shards_static = tensor_util.constant_value(num_shards) index = ops.convert_to_tensor(index, name="index", dtype=dtypes.int64) index_static = tensor_util.constant_value(index) if num_shards_static is not None and num_shards_static < 1: raise ValueError("num_shards must be >= 1; got: %s" % num_shards_static) if index_static is not None and index_static < 0: raise ValueError("index must be >= 0; got: %s" % index_static) if (index_static is not None and num_shards_static is not None and index_static >= num_shards_static): raise ValueError("index must be <= num_shards; %s is not < %s" % (index_static, num_shards_static)) def filter_fn(elem_index, _): mod_result = math_ops.mod(elem_index, num_shards) return math_ops.equal(mod_result, index) return self._enumerate().filter(filter_fn).map(lambda _, elem: elem) def batch(self, batch_size, drop_remainder=False): """Combines consecutive elements of this dataset into batches. The tensors in the resulting element will have an additional outer dimension, which will be `batch_size` (or `N % batch_size` for the last element if `batch_size` does not divide the number of input elements `N` evenly and `drop_remainder` is `False`). If your program depends on the batches having the same outer dimension, you should set the `drop_remainder` argument to `True` to prevent the smaller batch from being produced. Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case its has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. Returns: Dataset: A `Dataset`. """ return BatchDataset(self, batch_size, drop_remainder) def padded_batch(self, batch_size, padded_shapes, padding_values=None, drop_remainder=False): """Combines consecutive elements of this dataset into padded batches. This transformation combines multiple consecutive elements of the input dataset into a single element. Like `tf.data.Dataset.batch`, the tensors in the resulting element will have an additional outer dimension, which will be `batch_size` (or `N % batch_size` for the last element if `batch_size` does not divide the number of input elements `N` evenly and `drop_remainder` is `False`). If your program depends on the batches having the same outer dimension, you should set the `drop_remainder` argument to `True` to prevent the smaller batch from being produced. Unlike `tf.data.Dataset.batch`, the input elements to be batched may have different shapes, and this transformation will pad each component to the respective shape in `padding_shapes`. The `padding_shapes` argument determines the resulting shape for each dimension of each component in an output element: * If the dimension is a constant (e.g. `tf.Dimension(37)`), the component will be padded out to that length in that dimension. * If the dimension is unknown (e.g. `tf.Dimension(None)`), the component will be padded out to the maximum length of all elements in that dimension. See also `tf.contrib.data.dense_to_sparse_batch`, which combines elements that may have different shapes into a `tf.SparseTensor`. Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector tensor-like objects representing the shape to which the respective component of each input element should be padded prior to batching. Any unknown dimensions (e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a tensor-like object) will be padded to the maximum size of that dimension in each batch. padding_values: (Optional.) A nested structure of scalar-shaped `tf.Tensor`, representing the padding values to use for the respective components. Defaults are `0` for numeric types and the empty string for string types. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case its has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. Returns: Dataset: A `Dataset`. """ return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values, drop_remainder) def map(self, map_func, num_parallel_calls=None): """Maps `map_func` across the elements of this dataset. This transformation applies `map_func` to each element of this dataset, and returns a new dataset containing the transformed elements, in the same order as they appeared in the input. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3, 4, 5 } a.map(lambda x: x + 1) = { 2, 3, 4, 5, 6 } ``` The input signature of `map_func` is determined by the structure of each element in this dataset. For example: ```python # Each element is a `tf.Tensor` object. a = { 1, 2, 3, 4, 5 } # `map_func` takes a single argument of type `tf.Tensor` with the same # shape and dtype. result = a.map(lambda x: ...) # Each element is a tuple containing two `tf.Tensor` objects. b = { (1, "foo"), (2, "bar"), (3, "baz") } # `map_func` takes two arguments of type `tf.Tensor`. result = b.map(lambda x_int, y_str: ...) # Each element is a dictionary mapping strings to `tf.Tensor` objects. c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} } # `map_func` takes a single argument of type `dict` with the same keys as # the elements. result = c.map(lambda d: ...) ``` The value or values returned by `map_func` determine the structure of each element in the returned dataset. ```python # `map_func` returns a scalar `tf.Tensor` of type `tf.float32`. def f(...): return tf.constant(37.0) result = dataset.map(f) result.output_classes == tf.Tensor result.output_types == tf.float32 result.output_shapes == [] # scalar # `map_func` returns two `tf.Tensor` objects. def g(...): return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"]) result = dataset.map(g) result.output_classes == (tf.Tensor, tf.Tensor) result.output_types == (tf.float32, tf.string) result.output_shapes == ([], [3]) # Python primitives, lists, and NumPy arrays are implicitly converted to # `tf.Tensor`. def h(...): return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64) result = dataset.map(h) result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor) result.output_types == (tf.float32, tf.string, tf.float64) result.output_shapes == ([], [3], [2]) # `map_func` can return nested structures. def i(...): return {"a": 37.0, "b": [42, 16]}, "foo" result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor) result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string) result.output_shapes == ({"a": [], "b": [2]}, []) ``` In addition to `tf.Tensor` objects, `map_func` can accept as arguments and return `tf.SparseTensor` objects. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to another nested structure of tensors. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number elements to process in parallel. If not specified, elements will be processed sequentially. Returns: Dataset: A `Dataset`. """ if num_parallel_calls is None: return MapDataset(self, map_func) else: return ParallelMapDataset(self, map_func, num_parallel_calls) def flat_map(self, map_func): """Maps `map_func` across this dataset and flattens the result. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a `Dataset`. Returns: Dataset: A `Dataset`. """ return FlatMapDataset(self, map_func) def interleave(self, map_func, cycle_length, block_length=1, num_parallel_calls=None): """Maps `map_func` across this dataset, and interleaves the results. For example, you can use `Dataset.interleave()` to process many input files concurrently: ```python # Preprocess 4 files concurrently, and interleave blocks of 16 records from # each file. filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...] dataset = (Dataset.from_tensor_slices(filenames) .interleave(lambda x: TextLineDataset(x).map(parse_fn, num_parallel_calls=1), cycle_length=4, block_length=16)) ``` The `cycle_length` and `block_length` arguments control the order in which elements are produced. `cycle_length` controls the number of input elements that are processed concurrently. If you set `cycle_length` to 1, this transformation will handle one input element at a time, and will produce identical results = to `tf.data.Dataset.flat_map`. In general, this transformation will apply `map_func` to `cycle_length` input elements, open iterators on the returned `Dataset` objects, and cycle through them producing `block_length` consecutive elements from each iterator, and consuming the next input element each time it reaches the end of an iterator. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3, 4, 5 } # NOTE: New lines indicate "block" boundaries. a.interleave(lambda x: Dataset.from_tensors(x).repeat(6), cycle_length=2, block_length=4) == { 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, } ``` NOTE: The order of elements yielded by this transformation is deterministic, as long as `map_func` is a pure function. If `map_func` contains any stateful operations, the order in which that state is accessed is undefined. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a `Dataset`. cycle_length: The number of elements from this dataset that will be processed concurrently. block_length: The number of consecutive elements to produce from each input element before cycling to another input element. num_parallel_calls: (Optional.) If specified, the implementation creates a threadpool, which is used to fetch inputs from cycle elements asynchronously and in parallel. The default behavior is to fetch inputs from cycle elements synchronously with no parallelism. Returns: Dataset: A `Dataset`. """ if num_parallel_calls is None: return InterleaveDataset(self, map_func, cycle_length, block_length) else: return ParallelInterleaveDataset(self, map_func, cycle_length, block_length, num_parallel_calls) def filter(self, predicate): """Filters this dataset according to `predicate`. Args: predicate: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.bool` tensor. Returns: Dataset: The `Dataset` containing the elements of this dataset for which `predicate` is `True`. """ return FilterDataset(self, predicate) def apply(self, transformation_func): """Applies a transformation function to this dataset. `apply` enables chaining of custom `Dataset` transformations, which are represented as functions that take one `Dataset` argument and return a transformed `Dataset`. For example: ``` dataset = (dataset.map(lambda x: x ** 2) .apply(group_by_window(key_func, reduce_func, window_size)) .map(lambda x: x ** 3)) ``` Args: transformation_func: A function that takes one `Dataset` argument and returns a `Dataset`. Returns: Dataset: The `Dataset` returned by applying `transformation_func` to this dataset. """ dataset = transformation_func(self) if not isinstance(dataset, Dataset): raise TypeError("`transformation_func` must return a Dataset.") return dataset def window(self, size, shift=None, stride=1, drop_remainder=False): """Combines input elements into a dataset of windows. Each window is a dataset itself and contains `size` elements (or possibly fewer if there are not enough input elements to fill the window and `drop_remainder` evaluates to false). The `stride` argument determines the stride of the input elements, and the `shift` argument determines the shift of the window. For example: - `tf.data.Dataset.range(7).window(2)` produces `{{0, 1}, {2, 3}, {4, 5}, {6}}` - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` Args: size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of the input dataset to combine into a window. shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the forward shift of the sliding window in each iteration. Defaults to `size`. stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the stride of the input elements in the sliding window. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether a window should be dropped in case its size is smaller than `window_size`. Returns: Dataset: A `Dataset` of windows, each of which is a nested `Dataset` with the same structure as this dataset, but a finite subsequence of its elements. """ if shift is None: shift = size return WindowDataset(self, size, shift, stride, drop_remainder) class TensorDataset(Dataset): """A `Dataset` with a single element, viz. a nested structure of tensors.""" def __init__(self, tensors): """See `Dataset.from_tensors()` for details.""" super(TensorDataset, self).__init__() with ops.name_scope("tensors"): tensors = nest.pack_sequence_as(tensors, [ sparse_tensor_lib.SparseTensor.from_value(t) if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor( t, name="component_%d" % i) for i, t in enumerate(nest.flatten(tensors)) ]) self._tensors = sparse.serialize_sparse_tensors(tensors) self._output_classes = sparse.get_classes(tensors) self._output_shapes = nest.pack_sequence_as( tensors, [t.get_shape() for t in nest.flatten(tensors)]) self._output_types = nest.pack_sequence_as( tensors, [t.dtype for t in nest.flatten(tensors)]) def _as_variant_tensor(self): return gen_dataset_ops.tensor_dataset( nest.flatten(self._tensors), output_shapes=nest.flatten( sparse.as_dense_shapes(self.output_shapes, self.output_classes))) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types class TensorSliceDataset(Dataset): """A `Dataset` of slices from a nested structure of tensors.""" def __init__(self, tensors): """See `Dataset.from_tensor_slices()` for details.""" super(TensorSliceDataset, self).__init__() with ops.name_scope("tensors"): tensors = nest.pack_sequence_as(tensors, [ sparse_tensor_lib.SparseTensor.from_value(t) if sparse_tensor_lib.is_sparse(t) else ops.convert_to_tensor( t, name="component_%d" % i) for i, t in enumerate(nest.flatten(tensors)) ]) flat_tensors = nest.flatten(tensors) batch_dim = flat_tensors[0].get_shape()[0] for t in flat_tensors[1:]: batch_dim.assert_is_compatible_with(t.get_shape()[0]) self._tensors = sparse.serialize_many_sparse_tensors(tensors) self._output_classes = sparse.get_classes(tensors) self._output_shapes = nest.pack_sequence_as( tensors, [t.get_shape()[1:] for t in nest.flatten(tensors)]) self._output_types = nest.pack_sequence_as( tensors, [t.dtype for t in nest.flatten(tensors)]) def _as_variant_tensor(self): return gen_dataset_ops.tensor_slice_dataset( nest.flatten(self._tensors), output_shapes=nest.flatten( sparse.as_dense_shapes(self.output_shapes, self.output_classes))) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types class SparseTensorSliceDataset(Dataset): """A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows.""" def __init__(self, sparse_tensor): """See `Dataset.from_sparse_tensor_slices()` for details.""" super(SparseTensorSliceDataset, self).__init__() if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor): raise TypeError("`sparse_tensor` must be a `tf.SparseTensor` object.") self._sparse_tensor = sparse_tensor def _as_variant_tensor(self): return gen_dataset_ops.sparse_tensor_slice_dataset( self._sparse_tensor.indices, self._sparse_tensor.values, self._sparse_tensor.dense_shape) @property def output_classes(self): return (ops.Tensor, ops.Tensor, ops.Tensor) @property def output_shapes(self): indices_shape = self._sparse_tensor.indices.get_shape() shape_shape = self._sparse_tensor.dense_shape.get_shape() rank = (indices_shape[1] - 1).merge_with(shape_shape[0] - 1) num_values = tensor_shape.Dimension(None) return (tensor_shape.TensorShape([num_values, rank]), tensor_shape.TensorShape([num_values]), tensor_shape.TensorShape([rank])) @property def output_types(self): return (dtypes.int64, self._sparse_tensor.dtype, dtypes.int64) class _NestedDatasetComponent(object): """The structure of a `Dataset` nested in a component of another `Dataset`. A `StructuredFunctionWrapper` around a function that returns a `Dataset` as one of its components will have a `NestedDatasetComponent` in the corresponding position in the `output_classes`, `output_shapes`, and `output_types` properties. NOTE(mrry): This class is not currently exposed via the public API. Support for nested datasets can be enabled on a function-by-function basis by setting `experimental_nested_dataset_support=True` in the `StructuredFunctionWrapper` initializer. TODO(b/110122868): Add this class, or something equivalent, to the public API. We are considering revising the public API for accessing Dataset structure (`output_classes` etc.) based on experience with nested datasets and other custom component types. """ def __init__(self, dataset=None, output_shapes=None, output_types=None, output_classes=None): if dataset is None: if (output_classes is None or output_shapes is None or output_types is None): raise ValueError( "Either `dataset`, or all of `output_classes`, " "`output_shapes`, and `output_types` must be specified.") self._output_classes = output_classes self._output_shapes = output_shapes self._output_types = output_types else: if not (output_classes is None and output_shapes is None and output_types is None): raise ValueError( "Either `dataset`, or all of `output_classes`, " "`output_shapes`, and `output_types` must be specified.") self._output_classes = dataset.output_classes self._output_shapes = dataset.output_shapes self._output_types = dataset.output_types @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types class _VariantDataset(Dataset): """A Dataset wrapper around a `tf.variant`-typed function argument.""" def __init__(self, dataset_variant, structure): super(_VariantDataset, self).__init__() self._dataset_variant = dataset_variant self._structure = structure def _as_variant_tensor(self): return self._dataset_variant @property def output_classes(self): return self._structure.output_classes @property def output_shapes(self): return self._structure.output_shapes @property def output_types(self): return self._structure.output_types class StructuredFunctionWrapper(object): """A wrapper for `Defun` that supports structured arguments and return values. """ def __init__(self, func, transformation_name, dataset=None, input_classes=None, input_shapes=None, input_types=None, add_to_graph=True, experimental_nested_dataset_support=False): """Creates a new `StructuredFunctionWrapper` for the given function. Args: func: A function from a nested structure to another nested structure. transformation_name: Human-readable name of the transformation in which this function is being instantiated, for error messages. dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this dataset will be assumed as the structure for `func` arguments; otherwise `input_classes`, `input_shapes`, and `input_types` must be defined. input_classes: (Optional.) A nested structure of `type`. If given, this argument defines the Python types for `func` arguments. input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If given, this argument defines the shapes and structure for `func` arguments. input_types: (Optional.) A nested structure of `tf.DType`. If given, this argument defines the element types and structure for `func` arguments. add_to_graph: (Optional.) If `True`, the function will be added to the default graph. experimental_nested_dataset_support: (Optional.) If `True`, the function will support `tf.data.Dataset` objects as arguments and return values. Raises: ValueError: If an invalid combination of `dataset`, `input_classes`, `input_shapes`, and `input_types` is passed. """ if dataset is None: if input_classes is None or input_shapes is None or input_types is None: raise ValueError("Either `dataset`, or all of `input_classes`, " "`input_shapes`, and `input_types` must be specified.") self._input_shapes = input_shapes self._input_types = input_types self._input_classes = input_classes else: if not (input_classes is None and input_shapes is None and input_types is None): raise ValueError("Either `dataset`, or all of `input_classes`, " "`input_shapes`, and `input_types` must be specified.") self._input_shapes = dataset.output_shapes self._input_types = dataset.output_types self._input_classes = dataset.output_classes self._transformation_name = transformation_name # TODO(b/110122868): Enable this support for all `tf.data` functions. self._nested_dataset_support = experimental_nested_dataset_support @function.Defun(*self._defun_args()) def tf_data_structured_function_wrapper(*args): """Wrapper for passing nested structures to and from tf.data functions.""" flat_args = [] for arg, arg_class, arg_shape, arg_type in zip( args, nest.flatten(self._input_classes), nest.flatten(self._input_shapes), nest.flatten(self._input_types)): # TODO(b/110122868): Add a registration mechanism for new component # types. if arg_class is sparse_tensor_lib.SparseTensor: arg = sparse.deserialize_sparse_tensors( arg, arg_type, arg_shape, arg_class) arg.indices.set_shape([None, arg_shape.ndims]) arg.dense_shape.set_shape([arg_shape.ndims]) elif isinstance(arg_class, _NestedDatasetComponent): assert self._nested_dataset_support arg = _VariantDataset(arg, arg_class) else: arg.set_shape(arg_shape) flat_args.append(arg) nested_args = nest.pack_sequence_as(self._input_classes, flat_args) if not _should_unpack_args(nested_args): nested_args = (nested_args,) ret = func(*nested_args) # If `func` returns a list of tensors, `nest.flatten()` and # `ops.convert_to_tensor()` would conspire to attempt to stack # those tensors into a single tensor, because the customized # version of `nest.flatten()` does not recurse into lists. Since # it is more likely that the list arose from returning the # result of an operation (such as `tf.py_func()`) that returns a # list of not-necessarily-stackable tensors, we treat the # returned value is a `tuple` instead. A user wishing to pack # the return value into a single tensor can use an explicit # `tf.stack()` before returning. if isinstance(ret, list): ret = tuple(ret) # Convert any `SparseTensorValue`s to `SparseTensor`s and all other # values to tensors. flat_ret = [] flat_classes = [] flat_shapes = [] flat_types = [] for t in nest.flatten(ret): # TODO(b/110122868): Add a registration mechanism for new component # types. if sparse_tensor_lib.is_sparse(t): t = sparse_tensor_lib.SparseTensor.from_value(t) flat_ret.append(sparse.serialize_sparse_tensors(t)) flat_classes.append(sparse_tensor_lib.SparseTensor) flat_shapes.append(t.get_shape()) flat_types.append(t.dtype) elif isinstance(t, Dataset): if not self._nested_dataset_support: raise NotImplementedError( "The %s transformation does not currently support nested " "datasets as outputs." % self._transformation_name) flat_ret.append(t._as_variant_tensor()) # pylint: disable=protected-access component = _NestedDatasetComponent(t) flat_classes.append(component) flat_shapes.append(component) flat_types.append(component) else: try: t = ops.convert_to_tensor(t) except (ValueError, TypeError): raise TypeError("Unsupported return value from function passed to " "%s: %s." % (transformation_name, t)) flat_ret.append(t) flat_classes.append(ops.Tensor) flat_shapes.append(t.get_shape()) flat_types.append(t.dtype) ret = nest.pack_sequence_as(ret, flat_ret) self._output_classes = nest.pack_sequence_as(ret, flat_classes) self._output_shapes = nest.pack_sequence_as(ret, flat_shapes) self._output_types = nest.pack_sequence_as(ret, flat_types) _warn_if_collections(transformation_name) return flat_ret self._function = tf_data_structured_function_wrapper if add_to_graph: self._function.add_to_graph(ops.get_default_graph()) else: # Use the private method that will execute # `tf_data_structured_function_wrapper` but delay adding it to the graph # in case (e.g.) we need to rerun the function. self._function._create_definition_if_needed() # pylint: disable=protected-access def _defun_args(self): """Returns a flat list of `tf.DType` for the input element structure.""" ret = [] for input_type, input_class in zip(nest.flatten(self._input_types), nest.flatten(self._input_classes)): # TODO(b/110122868): Add a registration mechanism for new component types. if input_class is sparse_tensor_lib.SparseTensor: ret.append(dtypes.variant) elif isinstance(input_class, _NestedDatasetComponent): if not self._nested_dataset_support: raise NotImplementedError( "The %s transformation does not currently support nested " "datasets as inputs." % self._transformation_name) ret.append(dtypes.variant) else: assert isinstance(input_type, dtypes.DType) ret.append(input_type) return ret @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types @property def function(self): return self._function def flat_structure(dataset): """Helper for setting `output_shapes` and `output_types` attrs of Dataset ops. Most Dataset op constructors expect `output_shapes` and `output_types` arguments that represent the flattened structure of an element. This helper function generates these attrs as a keyword argument dictionary, allowing `Dataset._as_variant_tensor()` implementations to pass `**flat_structure(self)` to the op constructor. Args: dataset: A `tf.data.Dataset`. Returns: A dictionary of keyword arguments that can be passed to many Dataset op constructors. """ output_classes = [] output_shapes = [] output_types = [] for output_class, output_shape, output_type in zip( nest.flatten(dataset.output_classes), nest.flatten(dataset.output_shapes), nest.flatten(dataset.output_types)): if isinstance(output_class, _NestedDatasetComponent): output_classes.append(output_class.output_classes) output_shapes.append(output_shape.output_shapes) output_types.append(output_type.output_types) else: output_classes.append(output_class) output_shapes.append(output_shape) output_types.append(output_type) output_classes = nest.pack_sequence_as(dataset.output_classes, output_classes) output_shapes = nest.pack_sequence_as(dataset.output_shapes, output_shapes) output_types = nest.pack_sequence_as(dataset.output_types, output_types) return { "output_shapes": nest.flatten(sparse.as_dense_shapes(output_shapes, output_classes)), "output_types": nest.flatten(sparse.as_dense_types(output_types, output_classes)), } class _GeneratorDataset(Dataset): """A `Dataset` that generates elements by invoking a function.""" def __init__(self, init_args, init_func, next_func, finalize_func): """Constructs a `_GeneratorDataset`. Args: init_args: A nested structure representing the arguments to `init_func`. init_func: A TensorFlow function that will be called on `init_args` each time a C++ iterator over this dataset is constructed. Returns a nested structure representing the "state" of the dataset. next_func: A TensorFlow function that will be called on the result of `init_func` to produce each element, and that raises `OutOfRangeError` to terminate iteration. finalize_func: A TensorFlow function that will be called on the result of `init_func` immediately before a C++ iterator over this dataset is destroyed. The return value is ignored. """ super(_GeneratorDataset, self).__init__() # These members will be initialized by `tf_init_func`. self._state_classes = None self._state_shapes = None self._state_types = None self._init_args = init_args init_args_classes = sparse.get_classes(init_args) init_args_shapes = nest.pack_sequence_as( init_args, [t.get_shape() for t in nest.flatten(init_args)]) init_args_types = nest.pack_sequence_as( init_args, [t.dtype for t in nest.flatten(init_args)]) wrapped_init_func = StructuredFunctionWrapper( init_func, "GeneratorDataset", input_classes=init_args_classes, input_shapes=init_args_shapes, input_types=init_args_types) self._state_classes = wrapped_init_func.output_classes self._state_shapes = wrapped_init_func.output_shapes self._state_types = wrapped_init_func.output_types self._init_func = wrapped_init_func.function wrapped_next_func = StructuredFunctionWrapper( next_func, "GeneratorDataset", input_classes=self._state_classes, input_shapes=self._state_shapes, input_types=self._state_types) self._output_classes = wrapped_next_func.output_classes self._output_shapes = wrapped_next_func.output_shapes self._output_types = wrapped_next_func.output_types self._next_func = wrapped_next_func.function wrapped_finalize_func = StructuredFunctionWrapper( finalize_func, "GeneratorDataset", input_classes=self._state_classes, input_shapes=self._state_shapes, input_types=self._state_types) self._finalize_func = wrapped_finalize_func.function def _as_variant_tensor(self): return gen_dataset_ops.generator_dataset( nest.flatten(self._init_args) + self._init_func.captured_inputs, self._next_func.captured_inputs, self._finalize_func.captured_inputs, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **flat_structure(self)) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types class ZipDataset(Dataset): """A `Dataset` that zips its inputs together.""" def __init__(self, datasets): """See `Dataset.zip()` for details.""" super(ZipDataset, self).__init__() for ds in nest.flatten(datasets): if not isinstance(ds, Dataset): if isinstance(ds, list): message = ("The argument to `Dataset.zip()` must be a nested " "structure of `Dataset` objects. Nested structures do not " "support Python lists; please use a tuple instead.") else: message = ("The argument to `Dataset.zip()` must be a nested " "structure of `Dataset` objects.") raise TypeError(message) self._datasets = datasets def _as_variant_tensor(self): # pylint: disable=protected-access return gen_dataset_ops.zip_dataset( [ds._as_variant_tensor() for ds in nest.flatten(self._datasets)], **flat_structure(self)) # pylint: enable=protected-access @property def output_classes(self): return nest.pack_sequence_as( self._datasets, [ds.output_classes for ds in nest.flatten(self._datasets)]) @property def output_shapes(self): return nest.pack_sequence_as( self._datasets, [ds.output_shapes for ds in nest.flatten(self._datasets)]) @property def output_types(self): return nest.pack_sequence_as( self._datasets, [ds.output_types for ds in nest.flatten(self._datasets)]) class ConcatenateDataset(Dataset): """A `Dataset` that concatenates its input with given dataset.""" def __init__(self, input_dataset, dataset_to_concatenate): """See `Dataset.concatenate()` for details.""" super(ConcatenateDataset, self).__init__() self._input_dataset = input_dataset self._dataset_to_concatenate = dataset_to_concatenate if input_dataset.output_types != dataset_to_concatenate.output_types: raise TypeError( "Two datasets to concatenate have different types %s and %s" % (input_dataset.output_types, dataset_to_concatenate.output_types)) if input_dataset.output_classes != dataset_to_concatenate.output_classes: raise TypeError( "Two datasets to concatenate have different classes %s and %s" % (input_dataset.output_classes, dataset_to_concatenate.output_classes)) def _as_variant_tensor(self): # pylint: disable=protected-access return gen_dataset_ops.concatenate_dataset( self._input_dataset._as_variant_tensor(), self._dataset_to_concatenate._as_variant_tensor(), **flat_structure(self)) # pylint: enable=protected-access @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return nest.pack_sequence_as(self._input_dataset.output_shapes, [ ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip( nest.flatten(self._input_dataset.output_shapes), nest.flatten(self._dataset_to_concatenate.output_shapes)) ]) @property def output_types(self): return self._input_dataset.output_types class RepeatDataset(Dataset): """A `Dataset` that repeats its input several times.""" def __init__(self, input_dataset, count): """See `Dataset.repeat()` for details.""" super(RepeatDataset, self).__init__() self._input_dataset = input_dataset if count is None: self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count") else: self._count = ops.convert_to_tensor( count, dtype=dtypes.int64, name="count") def _as_variant_tensor(self): return gen_dataset_ops.repeat_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access count=self._count, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class RangeDataset(Dataset): """A `Dataset` of a step separated range of values.""" def __init__(self, *args): """See `Dataset.range()` for details.""" super(RangeDataset, self).__init__() self._parse_args(*args) def _parse_args(self, *args): """Parse arguments according to the same rules as the `range()` builtin.""" if len(args) == 1: self._start = self._build_tensor(0, "start") self._stop = self._build_tensor(args[0], "stop") self._step = self._build_tensor(1, "step") elif len(args) == 2: self._start = self._build_tensor(args[0], "start") self._stop = self._build_tensor(args[1], "stop") self._step = self._build_tensor(1, "step") elif len(args) == 3: self._start = self._build_tensor(args[0], "start") self._stop = self._build_tensor(args[1], "stop") self._step = self._build_tensor(args[2], "step") else: raise ValueError("Invalid arguments to RangeDataset: %s" % str(args)) def _build_tensor(self, int64_value, name): return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name) def _as_variant_tensor(self): return gen_dataset_ops.range_dataset( start=self._start, stop=self._stop, step=self._step, **flat_structure(self)) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return tensor_shape.scalar() @property def output_types(self): return dtypes.int64 class CacheDataset(Dataset): """A `Dataset` that caches elements of its input.""" def __init__(self, input_dataset, filename): """See `Dataset.cache()` for details.""" super(CacheDataset, self).__init__() self._input_dataset = input_dataset self._filename = ops.convert_to_tensor( filename, dtype=dtypes.string, name="filename") def _as_variant_tensor(self): return gen_dataset_ops.cache_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access filename=self._filename, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class ShuffleDataset(Dataset): """A `Dataset` that randomly shuffles the elements of its input.""" def __init__(self, input_dataset, buffer_size, seed=None, reshuffle_each_iteration=None): """Randomly shuffles the elements of this dataset. Args: input_dataset: The input dataset. buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements from this dataset from which the new dataset will sample. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.set_random_seed` for behavior. reshuffle_each_iteration: (Optional.) A boolean, which if true indicates that the dataset should be pseudorandomly reshuffled each time it is iterated over. (Defaults to `True`.) Returns: A `Dataset`. Raises: ValueError: if invalid arguments are provided. """ super(ShuffleDataset, self).__init__() self._input_dataset = input_dataset self._buffer_size = ops.convert_to_tensor( buffer_size, dtype=dtypes.int64, name="buffer_size") self._seed, self._seed2 = random_seed.get_seed(seed) if reshuffle_each_iteration is None: self._reshuffle_each_iteration = True else: self._reshuffle_each_iteration = reshuffle_each_iteration def _as_variant_tensor(self): return gen_dataset_ops.shuffle_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, reshuffle_each_iteration=self._reshuffle_each_iteration, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class TakeDataset(Dataset): """A `Dataset` containing the first `count` elements from its input.""" def __init__(self, input_dataset, count): """See `Dataset.take()` for details.""" super(TakeDataset, self).__init__() self._input_dataset = input_dataset self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count") def _as_variant_tensor(self): return gen_dataset_ops.take_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access count=self._count, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class SkipDataset(Dataset): """A `Dataset` skipping the first `count` elements from its input.""" def __init__(self, input_dataset, count): """See `Dataset.skip()` for details.""" super(SkipDataset, self).__init__() self._input_dataset = input_dataset self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count") def _as_variant_tensor(self): return gen_dataset_ops.skip_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access count=self._count, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class BatchDataset(Dataset): """A `Dataset` that batches contiguous elements from its input.""" def __init__(self, input_dataset, batch_size, drop_remainder): """See `Dataset.batch()` for details.""" super(BatchDataset, self).__init__() self._input_dataset = input_dataset self._batch_size = ops.convert_to_tensor( batch_size, dtype=dtypes.int64, name="batch_size") self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") def _as_variant_tensor(self): # TODO(jsimsa): Switch to using v2 only any time after 6/30/2018. if smart_cond.smart_constant_value(self._drop_remainder) is False: return gen_dataset_ops.batch_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access batch_size=self._batch_size, **flat_structure(self)) else: return gen_dataset_ops.batch_dataset_v2( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access batch_size=self._batch_size, drop_remainder=self._drop_remainder, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): input_shapes = self._input_dataset.output_shapes return nest.pack_sequence_as(input_shapes, [ tensor_shape.vector( tensor_util.constant_value(self._batch_size) if smart_cond. smart_constant_value(self._drop_remainder) else None).concatenate(s) for s in nest.flatten(self._input_dataset.output_shapes) ]) @property def output_types(self): return self._input_dataset.output_types def _is_padded_shape_compatible_with(padded_shape, input_component_shape): """Returns `True` if `input_component_shape` can be padded to `padded_shape`. Args: padded_shape: A `tf.TensorShape`. input_component_shape: A `tf.TensorShape`. Returns: `True` if `input_component_shape` can be padded to `padded_shape`, otherwise `False`. """ if padded_shape.dims is None or input_component_shape.dims is None: return True if len(padded_shape.dims) != len(input_component_shape.dims): return False for padded_dim, input_dim in zip( padded_shape.dims, input_component_shape.dims): if (padded_dim.value is not None and input_dim.value is not None and padded_dim.value < input_dim.value): return False return True def _padded_shape_to_tensor(padded_shape, input_component_shape): """Converts `padded_shape` to a `tf.Tensor` representing that shape. Args: padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python sequence, or a 1-D `tf.Tensor` of `tf.int64` elements. input_component_shape: A `tf.TensorShape`, with which `padded_shape` must be compatible. Returns: A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`. Raises: ValueError: If `padded_shape` is not a shape or not compatible with `input_component_shape`. TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor. """ try: # Try to convert the `padded_shape` to a `tf.TensorShape` padded_shape_as_shape = tensor_shape.as_shape(padded_shape) # We will return the "canonical" tensor representation, which uses # `-1` in place of `None`. ret = ops.convert_to_tensor( [dim if dim is not None else -1 for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64) except (TypeError, ValueError): # The argument was not trivially convertible to a # `tf.TensorShape`, so fall back on the conversion to tensor # machinery. ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64) if ret.shape.dims is not None and len(ret.shape.dims) != 1: raise ValueError( "Padded shape %s must be a 1-D tensor of tf.int64 values, but its " "shape was %s." % (padded_shape, ret.shape)) if ret.dtype != dtypes.int64: raise TypeError( "Padded shape %s must be a 1-D tensor of tf.int64 values, but its " "element type was %s." % (padded_shape, ret.dtype.name)) padded_shape_as_shape = tensor_util.constant_value_as_shape(ret) if not _is_padded_shape_compatible_with(padded_shape_as_shape, input_component_shape): raise ValueError("The padded shape %s is not compatible with the " "corresponding input component shape %s." % (padded_shape_as_shape, input_component_shape)) return ret def _padding_value_to_tensor(value, output_type): """Converts the padding value to a tensor. Args: value: The padding value. output_type: Its expected dtype. Returns: A scalar `Tensor`. Raises: ValueError: if the padding value is not a scalar. TypeError: if the padding value's type does not match `output_type`. """ value = ops.convert_to_tensor(value, name="padding_value") if not value.shape.is_compatible_with(tensor_shape.scalar()): raise ValueError("Padding value should be a scalar, but is not: %s" % value) if value.dtype != output_type: raise TypeError("Padding value tensor (%s) does not match output type: %s" % (value, output_type)) return value def _default_padding(input_dataset): """Returns default padding tensors in a structure matching `input_dataset`.""" def make_zero(t): if t.base_dtype == dtypes.string: return "" elif t.base_dtype == dtypes.variant: raise TypeError("Unable to create padding for field of type 'variant'") else: return np.zeros_like(t.as_numpy_dtype()) return nest.map_structure(make_zero, input_dataset.output_types) class PaddedBatchDataset(Dataset): """A `Dataset` that batches and pads contiguous elements from its input.""" def __init__(self, input_dataset, batch_size, padded_shapes, padding_values, drop_remainder): """See `Dataset.batch()` for details.""" super(PaddedBatchDataset, self).__init__() if sparse.any_sparse(input_dataset.output_classes): # TODO(b/63669786): support batching of sparse tensors raise TypeError( "Batching of padded sparse tensors is not currently supported") self._input_dataset = input_dataset self._batch_size = ops.convert_to_tensor( batch_size, dtype=dtypes.int64, name="batch_size") padding_values = ( padding_values if padding_values is not None else _default_padding(input_dataset)) flat_padded_shapes = nest.flatten_up_to(input_dataset.output_shapes, padded_shapes) flat_padded_shapes_as_tensors = [] for input_component_shape, padded_shape in zip( nest.flatten(input_dataset.output_shapes), flat_padded_shapes): flat_padded_shapes_as_tensors.append( _padded_shape_to_tensor(padded_shape, input_component_shape)) self._padded_shapes = nest.pack_sequence_as(input_dataset.output_shapes, flat_padded_shapes_as_tensors) self._padding_values = nest.map_structure_up_to( input_dataset.output_shapes, _padding_value_to_tensor, padding_values, input_dataset.output_types) self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") def _as_variant_tensor(self): # TODO(jsimsa): Switch to using v2 only any time after 6/30/2018. if smart_cond.smart_constant_value(self._drop_remainder) is False: return gen_dataset_ops.padded_batch_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access batch_size=self._batch_size, padded_shapes=[ ops.convert_to_tensor(s, dtype=dtypes.int64) for s in nest.flatten(self._padded_shapes) ], padding_values=nest.flatten(self._padding_values), output_shapes=nest.flatten( sparse.as_dense_shapes(self.output_shapes, self.output_classes))) else: return gen_dataset_ops.padded_batch_dataset_v2( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access batch_size=self._batch_size, padded_shapes=[ ops.convert_to_tensor(s, dtype=dtypes.int64) for s in nest.flatten(self._padded_shapes) ], padding_values=nest.flatten(self._padding_values), drop_remainder=self._drop_remainder, output_shapes=nest.flatten( sparse.as_dense_shapes(self.output_shapes, self.output_classes))) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): def _padded_shape_to_batch_shape(s): return tensor_shape.vector( tensor_util.constant_value(self._batch_size) if smart_cond. smart_constant_value(self._drop_remainder) else None).concatenate( tensor_util.constant_value_as_shape(s)) return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes) @property def output_types(self): return self._input_dataset.output_types def _should_unpack_args(args): """Returns `True` if `args` should be `*args` when passed to a callable.""" return type(args) is tuple # pylint: disable=unidiomatic-typecheck def _warn_if_collections(transformation_name): """Prints warning message if the current graph uses common graph collections. NOTE(mrry): Currently a warning is only generated for lookup tables. Any variables created will be automatically hoisted out to the outermost scope using `init_scope()`. Some collections (such as for control-flow contexts) are benign and should not generate a warning. Args: transformation_name: A human-readable name for the transformation. """ if ops.get_default_graph().get_collection(ops.GraphKeys.TABLE_INITIALIZERS): warnings.warn("Creating lookup tables inside a function passed to %s is not" " supported. Create each table outside the function, and " "capture it inside the function to use it." % transformation_name) class MapDataset(Dataset): """A `Dataset` that maps a function over elements in its input.""" def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True): """See `Dataset.map()` for details.""" super(MapDataset, self).__init__() self._input_dataset = input_dataset self._use_inter_op_parallelism = use_inter_op_parallelism wrapped_func = StructuredFunctionWrapper( map_func, "Dataset.map()", input_dataset) self._output_classes = wrapped_func.output_classes self._output_shapes = wrapped_func.output_shapes self._output_types = wrapped_func.output_types self._map_func = wrapped_func.function def _as_variant_tensor(self): input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access return gen_dataset_ops.map_dataset( input_t, self._map_func.captured_inputs, f=self._map_func, use_inter_op_parallelism=self._use_inter_op_parallelism, **flat_structure(self)) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types class ParallelMapDataset(MapDataset): """A `Dataset` that maps a function over elements in its input in parallel.""" def __init__(self, input_dataset, map_func, num_parallel_calls, use_inter_op_parallelism=True): """See `Dataset.map()` for details.""" super(ParallelMapDataset, self).__init__(input_dataset, map_func, use_inter_op_parallelism) self._num_parallel_calls = ops.convert_to_tensor( num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls") def _as_variant_tensor(self): input_t = self._input_dataset._as_variant_tensor() # pylint: disable=protected-access # pylint: disable=protected-access return gen_dataset_ops.parallel_map_dataset( input_t, self._map_func.captured_inputs, f=self._map_func, num_parallel_calls=self._num_parallel_calls, use_inter_op_parallelism=self._use_inter_op_parallelism, **flat_structure(self)) # pylint: enable=protected-access class FlatMapDataset(Dataset): """A `Dataset` that maps a function over its input and flattens the result.""" def __init__(self, input_dataset, map_func): """See `Dataset.flat_map()` for details.""" super(FlatMapDataset, self).__init__() self._input_dataset = input_dataset wrapped_func = StructuredFunctionWrapper( map_func, self._transformation_name(), input_dataset, experimental_nested_dataset_support=True) if not isinstance(wrapped_func.output_classes, _NestedDatasetComponent): raise TypeError("`map_func` must return a `Dataset` object.") self._output_classes = wrapped_func.output_classes.output_classes self._output_types = wrapped_func.output_types.output_types self._output_shapes = wrapped_func.output_shapes.output_shapes self._map_func = wrapped_func.function def _as_variant_tensor(self): return gen_dataset_ops.flat_map_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access self._map_func.captured_inputs, f=self._map_func, **flat_structure(self)) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types def _transformation_name(self): return "Dataset.flat_map()" class InterleaveDataset(FlatMapDataset): """A `Dataset` that maps a function over its input and interleaves the result. """ def __init__(self, input_dataset, map_func, cycle_length, block_length): """See `Dataset.interleave()` for details.""" super(InterleaveDataset, self).__init__(input_dataset, map_func) self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") def _as_variant_tensor(self): return gen_dataset_ops.interleave_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access self._map_func.captured_inputs, # pylint: disable=protected-access self._cycle_length, self._block_length, f=self._map_func, # pylint: disable=protected-access **flat_structure(self)) def _transformation_name(self): return "Dataset.interleave()" class ParallelInterleaveDataset(FlatMapDataset): """A `Dataset` that maps a function over its input and interleaves the result. """ def __init__(self, input_dataset, map_func, cycle_length, block_length, num_parallel_calls): """See `Dataset.interleave()` for details.""" super(ParallelInterleaveDataset, self).__init__(input_dataset, map_func) self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") self._num_parallel_calls = ops.convert_to_tensor( num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls") def _as_variant_tensor(self): return gen_dataset_ops.parallel_interleave_dataset_v2( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access self._map_func.captured_inputs, # pylint: disable=protected-access self._cycle_length, self._block_length, self._num_parallel_calls, f=self._map_func, # pylint: disable=protected-access **flat_structure(self)) def _transformation_name(self): return "Dataset.interleave()" class FilterDataset(Dataset): """A `Dataset` that filters its input according to a predicate function.""" def __init__(self, input_dataset, predicate): """See `Dataset.filter()` for details.""" super(FilterDataset, self).__init__() self._input_dataset = input_dataset wrapped_func = StructuredFunctionWrapper( predicate, "Dataset.filter()", input_dataset) if not ( wrapped_func.output_types == dtypes.bool and wrapped_func.output_shapes.is_compatible_with(tensor_shape.scalar())): raise ValueError("`predicate` must return a scalar boolean tensor.") self._predicate = wrapped_func.function def _as_variant_tensor(self): return gen_dataset_ops.filter_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access other_arguments=self._predicate.captured_inputs, predicate=self._predicate, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class PrefetchDataset(Dataset): """A `Dataset` that asynchronously prefetches its input.""" def __init__(self, input_dataset, buffer_size): """See `Dataset.prefetch()` for details.""" super(PrefetchDataset, self).__init__() self._input_dataset = input_dataset if buffer_size is None: buffer_size = -1 # This is the sentinel for auto-tuning. self._buffer_size = ops.convert_to_tensor( buffer_size, dtype=dtypes.int64, name="buffer_size") def _as_variant_tensor(self): return gen_dataset_ops.prefetch_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access buffer_size=self._buffer_size, **flat_structure(self)) @property def output_classes(self): return self._input_dataset.output_classes @property def output_shapes(self): return self._input_dataset.output_shapes @property def output_types(self): return self._input_dataset.output_types class WindowDataset(Dataset): """A dataset that creates window datasets from the input elements.""" def __init__(self, input_dataset, size, shift, stride, drop_remainder): """See `window_dataset()` for more details.""" super(WindowDataset, self).__init__() self._input_dataset = input_dataset self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size") self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift") self._stride = ops.convert_to_tensor( stride, dtype=dtypes.int64, name="stride") self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") self._output_classes = nest.pack_sequence_as( input_dataset.output_classes, [ _NestedDatasetComponent( # pylint: disable=protected-access output_classes=output_class, output_shapes=output_shape, output_types=output_type) for output_class, output_shape, output_type in zip( nest.flatten(input_dataset.output_classes), nest.flatten(input_dataset.output_shapes), nest.flatten(input_dataset.output_types)) ]) self._output_shapes = self._output_classes self._output_types = self._output_classes def _as_variant_tensor(self): return gen_dataset_ops.window_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access self._size, self._shift, self._stride, self._drop_remainder, **flat_structure(self)) @property def output_classes(self): return self._output_classes @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types
{ "content_hash": "5e992a58c80d8c35d208d41170229a7e", "timestamp": "", "source": "github", "line_count": 2519, "max_line_length": 99, "avg_line_length": 37.85907105994442, "alnum_prop": 0.6609833590235616, "repo_name": "xodus7/tensorflow", "id": "93b3a7b93b3093e3322e585415c9508d17535df6", "size": "96056", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/python/data/ops/dataset_ops.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "1286" }, { "name": "Batchfile", "bytes": "9258" }, { "name": "C", "bytes": "340946" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "48861698" }, { "name": "CMake", "bytes": "195699" }, { "name": "Dockerfile", "bytes": "36400" }, { "name": "Go", "bytes": "1240309" }, { "name": "HTML", "bytes": "4681865" }, { "name": "Java", "bytes": "834061" }, { "name": "Jupyter Notebook", "bytes": "2604756" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "52618" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99243" }, { "name": "PHP", "bytes": "1357" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "40952138" }, { "name": "Ruby", "bytes": "553" }, { "name": "Shell", "bytes": "459258" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
import _plotly_utils.basevalidators class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="bordercolorsrc", parent_name="sankey.node.hoverlabel", **kwargs ): super(BordercolorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
{ "content_hash": "405d847e08ba03fc1925a490623041a4", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 73, "avg_line_length": 30.11764705882353, "alnum_prop": 0.583984375, "repo_name": "plotly/python-api", "id": "39073b69a7e958d433b1a61447bfd1f0e2bdb501", "size": "512", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packages/python/plotly/plotly/validators/sankey/node/hoverlabel/_bordercolorsrc.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "6870" }, { "name": "Makefile", "bytes": "1708" }, { "name": "Python", "bytes": "823245" }, { "name": "Shell", "bytes": "3238" } ], "symlink_target": "" }
""" This program executes SubDyn and a regression test for a single test case. The test data is contained in a git submodule, r-test, which must be initialized prior to running. See the r-test README or OpenFAST documentation for more info. Get usage with: `executeSubdynRegressionCase.py -h` """ import os import sys basepath = os.path.sep.join(sys.argv[0].split(os.path.sep)[:-1]) if os.path.sep in sys.argv[0] else "." sys.path.insert(0, os.path.sep.join([basepath, "lib"])) import argparse import shutil import glob import subprocess import rtestlib as rtl import openfastDrivers import pass_fail from errorPlotting import exportCaseSummary ##### Main program ### Store the python executable for future python calls pythonCommand = sys.executable ### Verify input arguments parser = argparse.ArgumentParser(description="Executes SubDyn and a regression test for a single test case.") parser.add_argument("caseName", metavar="Case-Name", type=str, nargs=1, help="The name of the test case.") parser.add_argument("executable", metavar="SubDyn-Driver", type=str, nargs=1, help="The path to the SubDyn driver executable.") parser.add_argument("sourceDirectory", metavar="path/to/openfast_repo", type=str, nargs=1, help="The path to the OpenFAST repository.") parser.add_argument("buildDirectory", metavar="path/to/openfast_repo/build", type=str, nargs=1, help="The path to the OpenFAST repository build directory.") parser.add_argument("tolerance", metavar="Test-Tolerance", type=float, nargs=1, help="Tolerance defining pass or failure in the regression test.") parser.add_argument("systemName", metavar="System-Name", type=str, nargs=1, help="The current system\'s name: [Darwin,Linux,Windows]") parser.add_argument("compilerId", metavar="Compiler-Id", type=str, nargs=1, help="The compiler\'s id: [Intel,GNU]") parser.add_argument("-p", "-plot", dest="plot", default=False, metavar="Plotting-Flag", type=bool, nargs="?", help="bool to include matplotlib plots in failed cases") parser.add_argument("-n", "-no-exec", dest="noExec", default=False, metavar="No-Execution", type=bool, nargs="?", help="bool to prevent execution of the test cases") parser.add_argument("-v", "-verbose", dest="verbose", default=False, metavar="Verbose-Flag", type=bool, nargs="?", help="bool to include verbose system output") args = parser.parse_args() caseName = args.caseName[0] executable = args.executable[0] sourceDirectory = args.sourceDirectory[0] buildDirectory = args.buildDirectory[0] tolerance = args.tolerance[0] plotError = args.plot if args.plot is False else True noExec = args.noExec if args.noExec is False else True verbose = args.verbose if args.verbose is False else True # validate inputs rtl.validateExeOrExit(executable) rtl.validateDirOrExit(sourceDirectory) if not os.path.isdir(buildDirectory): os.makedirs(buildDirectory) ### Build the filesystem navigation variables for running the test case regtests = os.path.join(sourceDirectory, "reg_tests") lib = os.path.join(regtests, "lib") rtest = os.path.join(regtests, "r-test") moduleDirectory = os.path.join(rtest, "modules", "subdyn") inputsDirectory = os.path.join(moduleDirectory, caseName) targetOutputDirectory = os.path.join(inputsDirectory) testBuildDirectory = os.path.join(buildDirectory, caseName) # verify all the required directories exist if not os.path.isdir(rtest): rtl.exitWithError("The test data directory, {}, does not exist. If you haven't already, run `git submodule update --init --recursive`".format(rtest)) if not os.path.isdir(targetOutputDirectory): rtl.exitWithError("The test data outputs directory, {}, does not exist. Try running `git submodule update`".format(targetOutputDirectory)) if not os.path.isdir(inputsDirectory): rtl.exitWithError("The test data inputs directory, {}, does not exist. Verify your local repository is up to date.".format(inputsDirectory)) # create the local output directory if it does not already exist # and initialize it with input files for all test cases if not os.path.isdir(testBuildDirectory): os.makedirs(testBuildDirectory) for file in glob.glob(os.path.join(inputsDirectory,caseName+".dvr")): filename = file.split(os.path.sep)[-1] shutil.copy(os.path.join(inputsDirectory,filename), os.path.join(testBuildDirectory,filename)) for file in glob.glob(os.path.join(inputsDirectory,"*dat")): filename = file.split(os.path.sep)[-1] shutil.copy(os.path.join(inputsDirectory,filename), os.path.join(testBuildDirectory,filename)) ### Run SubDyn on the test case if not noExec: caseInputFile = os.path.join(testBuildDirectory, caseName+".dvr") returnCode = openfastDrivers.runSubdynDriverCase(caseInputFile, executable) if returnCode != 0: rtl.exitWithError("") ### Build the filesystem navigation variables for running the regression test localOutFile = os.path.join(testBuildDirectory, caseName+".SD.out") baselineOutFile = os.path.join(targetOutputDirectory, caseName+".SD.out") rtl.validateFileOrExit(localOutFile) rtl.validateFileOrExit(baselineOutFile) testData, testInfo, testPack = pass_fail.readFASTOut(localOutFile) baselineData, baselineInfo, _ = pass_fail.readFASTOut(baselineOutFile) performance = pass_fail.calculateNorms(testData, baselineData) normalizedNorm = performance[:, 1] # export all case summaries results = list(zip(testInfo["attribute_names"], [*performance])) results_max = performance.max(axis=0) exportCaseSummary(testBuildDirectory, caseName, results, results_max, tolerance) # failing case if not pass_fail.passRegressionTest(normalizedNorm, tolerance): if plotError: from errorPlotting import finalizePlotDirectory, plotOpenfastError ixFailChannels = [i for i in range(len(testInfo["attribute_names"])) if normalizedNorm[i] > tolerance] failChannels = [channel for i, channel in enumerate(testInfo["attribute_names"]) if i in ixFailChannels] failResults = [res for i, res in enumerate(results) if i in ixFailChannels] for channel in failChannels: try: plotOpenfastError(localOutFile, baselineOutFile, channel) except: error = sys.exc_info()[1] print("Error generating plots: {}".format(error.msg)) finalizePlotDirectory(localOutFile, failChannels, caseName) sys.exit(1) # passing case sys.exit(0)
{ "content_hash": "c677f0ad9eb8a6d7cfaa7c6bcde484dd", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 166, "avg_line_length": 51.248, "alnum_prop": 0.7461754605057759, "repo_name": "OpenFAST/OpenFAST", "id": "151049fa4ee05767b64c64fc3fbd8bf39be92169", "size": "7011", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "reg_tests/executeSubdynRegressionCase.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1216" }, { "name": "C", "bytes": "1074920" }, { "name": "C++", "bytes": "1082869" }, { "name": "CMake", "bytes": "35507" }, { "name": "Fortran", "bytes": "10393056" }, { "name": "Makefile", "bytes": "26228" }, { "name": "Matlab", "bytes": "3377" }, { "name": "Objective-C", "bytes": "15882" } ], "symlink_target": "" }