_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q24600
IRGenerator._inject_patched_examples
train
def _inject_patched_examples(self, existing_item, patched_item): """Injects patched examples into original examples.""" for key, _ in patched_item.examples.items(): patched_example = patched_item.examples[key] existing_examples = existing_item.examples if key in existing_examples: existing_examples[key].fields.update(patched_example.fields) else: error_msg = 'Example defined in patch {} must correspond to a pre-existing example.' raise InvalidSpec(error_msg.format( quote(patched_item.name)), patched_example.lineno, patched_example.path)
python
{ "resource": "" }
q24601
IRGenerator._populate_type_attributes
train
def _populate_type_attributes(self): """ Converts each struct, union, and route from a forward reference to a full definition. """ for namespace in self.api.namespaces.values(): env = self._get_or_create_env(namespace.name) # do annotations before everything else, since populating aliases # and datatypes involves setting annotations for annotation in namespace.annotations: if isinstance(annotation, CustomAnnotation): loc = annotation._ast_node.lineno, annotation._ast_node.path if annotation.annotation_type_ns: if annotation.annotation_type_ns not in env: raise InvalidSpec( 'Namespace %s is not imported' % quote(annotation.annotation_type_ns), *loc) annotation_type_env = env[annotation.annotation_type_ns] if not isinstance(annotation_type_env, Environment): raise InvalidSpec( '%s is not a namespace.' % quote(annotation.annotation_type_ns), *loc) else: annotation_type_env = env if annotation.annotation_type_name not in annotation_type_env: raise InvalidSpec( 'Annotation type %s does not exist' % quote(annotation.annotation_type_name), *loc) annotation_type = annotation_type_env[annotation.annotation_type_name] if not isinstance(annotation_type, AnnotationType): raise InvalidSpec( '%s is not an annotation type' % quote(annotation.annotation_type_name), *loc ) annotation.set_attributes(annotation_type) for alias in namespace.aliases: data_type = self._resolve_type(env, alias._ast_node.type_ref) alias.set_attributes(alias._ast_node.doc, data_type) annotations = [self._resolve_annotation_type(env, annotation) for annotation in alias._ast_node.annotations] alias.set_annotations(annotations) for data_type in namespace.data_types: if not data_type._is_forward_ref: continue self._resolution_in_progress.add(data_type) if isinstance(data_type, Struct): self._populate_struct_type_attributes(env, data_type) elif isinstance(data_type, Union): self._populate_union_type_attributes(env, data_type) else: raise AssertionError('Unhandled type: %r' % type(data_type)) self._resolution_in_progress.remove(data_type) assert len(self._resolution_in_progress) == 0
python
{ "resource": "" }
q24602
IRGenerator._populate_struct_type_attributes
train
def _populate_struct_type_attributes(self, env, data_type): """ Converts a forward reference of a struct into a complete definition. """ parent_type = None extends = data_type._ast_node.extends if extends: # A parent type must be fully defined and not just a forward # reference. parent_type = self._resolve_type(env, extends, True) if isinstance(parent_type, Alias): # Restrict extending aliases because it's difficult to generate # code for it in Python. We put all type references at the end # to avoid out-of-order declaration issues, but using "extends" # in Python forces the reference to happen earlier. raise InvalidSpec( 'A struct cannot extend an alias. ' 'Use the canonical name instead.', data_type._ast_node.lineno, data_type._ast_node.path) if isinstance(parent_type, Nullable): raise InvalidSpec( 'A struct cannot extend a nullable type.', data_type._ast_node.lineno, data_type._ast_node.path) if not isinstance(parent_type, Struct): raise InvalidSpec( 'A struct can only extend another struct: ' '%s is not a struct.' % quote(parent_type.name), data_type._ast_node.lineno, data_type._ast_node.path) api_type_fields = [] for stone_field in data_type._ast_node.fields: api_type_field = self._create_struct_field(env, stone_field) api_type_fields.append(api_type_field) data_type.set_attributes( data_type._ast_node.doc, api_type_fields, parent_type)
python
{ "resource": "" }
q24603
IRGenerator._populate_union_type_attributes
train
def _populate_union_type_attributes(self, env, data_type): """ Converts a forward reference of a union into a complete definition. """ parent_type = None extends = data_type._ast_node.extends if extends: # A parent type must be fully defined and not just a forward # reference. parent_type = self._resolve_type(env, extends, True) if isinstance(parent_type, Alias): raise InvalidSpec( 'A union cannot extend an alias. ' 'Use the canonical name instead.', data_type._ast_node.lineno, data_type._ast_node.path) if isinstance(parent_type, Nullable): raise InvalidSpec( 'A union cannot extend a nullable type.', data_type._ast_node.lineno, data_type._ast_node.path) if not isinstance(parent_type, Union): raise InvalidSpec( 'A union can only extend another union: ' '%s is not a union.' % quote(parent_type.name), data_type._ast_node.lineno, data_type._ast_node.path) api_type_fields = [] for stone_field in data_type._ast_node.fields: if stone_field.name == 'other': raise InvalidSpec( "Union cannot define an 'other' field because it is " "reserved as the catch-all field for open unions.", stone_field.lineno, stone_field.path) api_type_fields.append(self._create_union_field(env, stone_field)) catch_all_field = None if data_type.closed: if parent_type and not parent_type.closed: # Due to the reversed super type / child type relationship for # unions, a child type cannot be closed if its parent is open # because the parent now has an extra field that is not # recognized by the child if it were substituted in for it. raise InvalidSpec( "Union cannot be closed since parent type '%s' is open." % ( parent_type.name), data_type._ast_node.lineno, data_type._ast_node.path) else: if not parent_type or parent_type.closed: # Create a catch-all field catch_all_field = UnionField( name='other', data_type=Void(), doc=None, ast_node=data_type._ast_node, catch_all=True) api_type_fields.append(catch_all_field) data_type.set_attributes( data_type._ast_node.doc, api_type_fields, parent_type, catch_all_field)
python
{ "resource": "" }
q24604
IRGenerator._populate_field_defaults
train
def _populate_field_defaults(self): """ Populate the defaults of each field. This is done in a separate pass because defaults that specify a union tag require the union to have been defined. """ for namespace in self.api.namespaces.values(): for data_type in namespace.data_types: # Only struct fields can have default if not isinstance(data_type, Struct): continue for field in data_type.fields: if not field._ast_node.has_default: continue if isinstance(field._ast_node.default, AstTagRef): default_value = TagRef( field.data_type, field._ast_node.default.tag) else: default_value = field._ast_node.default if not (field._ast_node.type_ref.nullable and default_value is None): # Verify that the type of the default value is correct for this field try: if field.data_type.name in ('Float32', 'Float64'): # You can assign int to the default value of float type # However float type should always have default value in float default_value = float(default_value) field.data_type.check(default_value) except ValueError as e: raise InvalidSpec( 'Field %s has an invalid default: %s' % (quote(field._ast_node.name), e), field._ast_node.lineno, field._ast_node.path) field.set_default(default_value)
python
{ "resource": "" }
q24605
IRGenerator._populate_route_attributes
train
def _populate_route_attributes(self): """ Converts all routes from forward references to complete definitions. """ route_schema = self._validate_stone_cfg() self.api.add_route_schema(route_schema) for namespace in self.api.namespaces.values(): env = self._get_or_create_env(namespace.name) for route in namespace.routes: self._populate_route_attributes_helper(env, route, route_schema)
python
{ "resource": "" }
q24606
IRGenerator._populate_route_attributes_helper
train
def _populate_route_attributes_helper(self, env, route, schema): """ Converts a single forward reference of a route into a complete definition. """ arg_dt = self._resolve_type(env, route._ast_node.arg_type_ref) result_dt = self._resolve_type(env, route._ast_node.result_type_ref) error_dt = self._resolve_type(env, route._ast_node.error_type_ref) ast_deprecated = route._ast_node.deprecated if ast_deprecated: assert ast_deprecated[0] new_route_name = ast_deprecated[1] new_route_version = ast_deprecated[2] if new_route_name: assert new_route_version is_not_defined = False is_not_route = False if new_route_name in env: if isinstance(env[new_route_name], ApiRoutesByVersion): if new_route_version not in env[new_route_name].at_version: is_not_defined = True else: is_not_route = True else: is_not_defined = True if is_not_defined: raise InvalidSpec( 'Undefined route %s at version %d.' % ( quote(new_route_name), new_route_version), route._ast_node.lineno, route._ast_node.path) if is_not_route: raise InvalidSpec( '%s must be a route.' % quote(new_route_name), route._ast_node.lineno, route._ast_node.path) new_route = env[new_route_name].at_version[new_route_version] deprecated = DeprecationInfo(new_route) else: deprecated = DeprecationInfo() else: deprecated = None attr_by_name = {} for attr in route._ast_node.attrs: attr_by_name[attr.name] = attr try: validated_attrs = schema.check_attr_repr(attr_by_name) except KeyError as e: raise InvalidSpec( "Route does not define attr key '%s'." % e.args[0], route._ast_node.lineno, route._ast_node.path) route.set_attributes( deprecated=deprecated, doc=route._ast_node.doc, arg_data_type=arg_dt, result_data_type=result_dt, error_data_type=error_dt, attrs=validated_attrs)
python
{ "resource": "" }
q24607
IRGenerator._instantiate_data_type
train
def _instantiate_data_type(self, data_type_class, data_type_args, loc): """ Responsible for instantiating a data type with additional attributes. This method ensures that the specified attributes are valid. Args: data_type_class (DataType): The class to instantiate. data_type_attrs (dict): A map from str -> values of attributes. These will be passed into the constructor of data_type_class as keyword arguments. Returns: stone.data_type.DataType: A parameterized instance. """ assert issubclass(data_type_class, DataType), \ 'Expected stone.data_type.DataType, got %r' % data_type_class argspec = inspect.getargspec(data_type_class.__init__) # noqa: E501 # pylint: disable=deprecated-method,useless-suppression argspec.args.remove('self') num_args = len(argspec.args) # Unfortunately, argspec.defaults is None if there are no defaults num_defaults = len(argspec.defaults or ()) pos_args, kw_args = data_type_args if (num_args - num_defaults) > len(pos_args): # Report if a positional argument is missing raise InvalidSpec( 'Missing positional argument %s for %s type' % (quote(argspec.args[len(pos_args)]), quote(data_type_class.__name__)), *loc) elif (num_args - num_defaults) < len(pos_args): # Report if there are too many positional arguments raise InvalidSpec( 'Too many positional arguments for %s type' % quote(data_type_class.__name__), *loc) # Map from arg name to bool indicating whether the arg has a default args = {} for i, key in enumerate(argspec.args): args[key] = (i >= num_args - num_defaults) for key in kw_args: # Report any unknown keyword arguments if key not in args: raise InvalidSpec('Unknown argument %s to %s type.' % (quote(key), quote(data_type_class.__name__)), *loc) # Report any positional args that are defined as keywords args. if not args[key]: raise InvalidSpec( 'Positional argument %s cannot be specified as a ' 'keyword argument.' % quote(key), *loc) del args[key] try: return data_type_class(*pos_args, **kw_args) except ParameterError as e: # Each data type validates its own attributes, and will raise a # ParameterError if the type or value is bad. raise InvalidSpec('Bad argument to %s type: %s' % (quote(data_type_class.__name__), e.args[0]), *loc)
python
{ "resource": "" }
q24608
IRGenerator._resolve_type
train
def _resolve_type(self, env, type_ref, enforce_fully_defined=False): """ Resolves the data type referenced by type_ref. If `enforce_fully_defined` is True, then the referenced type must be fully populated (fields, parent_type, ...), and not simply a forward reference. """ loc = type_ref.lineno, type_ref.path orig_namespace_name = env.namespace_name if type_ref.ns: # TODO(kelkabany): If a spec file imports a namespace, it is # available to all spec files that are part of the same namespace. # Might want to introduce the concept of an environment specific # to a file. if type_ref.ns not in env: raise InvalidSpec( 'Namespace %s is not imported' % quote(type_ref.ns), *loc) env = env[type_ref.ns] if not isinstance(env, Environment): raise InvalidSpec( '%s is not a namespace.' % quote(type_ref.ns), *loc) if type_ref.name not in env: raise InvalidSpec( 'Symbol %s is undefined.' % quote(type_ref.name), *loc) obj = env[type_ref.name] if obj is Void and type_ref.nullable: raise InvalidSpec('Void cannot be marked nullable.', *loc) elif inspect.isclass(obj): resolved_data_type_args = self._resolve_args(env, type_ref.args) data_type = self._instantiate_data_type( obj, resolved_data_type_args, (type_ref.lineno, type_ref.path)) elif isinstance(obj, ApiRoutesByVersion): raise InvalidSpec('A route cannot be referenced here.', *loc) elif type_ref.args[0] or type_ref.args[1]: # An instance of a type cannot have any additional # attributes specified. raise InvalidSpec('Attributes cannot be specified for ' 'instantiated type %s.' % quote(type_ref.name), *loc) else: data_type = env[type_ref.name] if type_ref.ns: # Add the source namespace as an import. namespace = self.api.ensure_namespace(orig_namespace_name) if isinstance(data_type, UserDefined): namespace.add_imported_namespace( self.api.ensure_namespace(type_ref.ns), imported_data_type=True) elif isinstance(data_type, Alias): namespace.add_imported_namespace( self.api.ensure_namespace(type_ref.ns), imported_alias=True) if (enforce_fully_defined and isinstance(data_type, UserDefined) and data_type._is_forward_ref): if data_type in self._resolution_in_progress: raise InvalidSpec( 'Unresolvable circular reference for type %s.' % quote(type_ref.name), *loc) self._resolution_in_progress.add(data_type) if isinstance(data_type, Struct): self._populate_struct_type_attributes(env, data_type) elif isinstance(data_type, Union): self._populate_union_type_attributes(env, data_type) self._resolution_in_progress.remove(data_type) if type_ref.nullable: unwrapped_dt, _ = unwrap_aliases(data_type) if isinstance(unwrapped_dt, Nullable): raise InvalidSpec( 'Cannot mark reference to nullable type as nullable.', *loc) data_type = Nullable(data_type) return data_type
python
{ "resource": "" }
q24609
IRGenerator._resolve_annotation_type
train
def _resolve_annotation_type(self, env, annotation_ref): """ Resolves the annotation type referenced by annotation_ref. """ loc = annotation_ref.lineno, annotation_ref.path if annotation_ref.ns: if annotation_ref.ns not in env: raise InvalidSpec( 'Namespace %s is not imported' % quote(annotation_ref.ns), *loc) env = env[annotation_ref.ns] if not isinstance(env, Environment): raise InvalidSpec( '%s is not a namespace.' % quote(annotation_ref.ns), *loc) if annotation_ref.annotation not in env: raise InvalidSpec( 'Annotation %s does not exist.' % quote(annotation_ref.annotation), *loc) return env[annotation_ref.annotation]
python
{ "resource": "" }
q24610
IRGenerator._resolve_args
train
def _resolve_args(self, env, args): """ Resolves type references in data type arguments to data types in the environment. """ pos_args, kw_args = args def check_value(v): if isinstance(v, AstTypeRef): return self._resolve_type(env, v) else: return v new_pos_args = [check_value(pos_arg) for pos_arg in pos_args] new_kw_args = {k: check_value(v) for k, v in kw_args.items()} return new_pos_args, new_kw_args
python
{ "resource": "" }
q24611
IRGenerator._create_route
train
def _create_route(self, env, item): """ Constructs a route and adds it to the environment. Args: env (dict): The environment of defined symbols. A new key is added corresponding to the name of this new route. item (AstRouteDef): Raw route definition from the parser. Returns: stone.api.ApiRoutesByVersion: A group of fully-defined routes indexed by versions. """ if item.name in env: if isinstance(env[item.name], ApiRoutesByVersion): if item.version in env[item.name].at_version: existing_dt = env[item.name].at_version[item.version] raise InvalidSpec( 'Route %s at version %d already defined (%s:%d).' % ( quote(item.name), item.version, existing_dt._ast_node.path, existing_dt._ast_node.lineno), item.lineno, item.path) else: existing_dt = env[item.name] raise InvalidSpec( 'Symbol %s already defined (%s:%d).' % ( quote(item.name), existing_dt._ast_node.path, existing_dt._ast_node.lineno), item.lineno, item.path) else: env[item.name] = ApiRoutesByVersion() route = ApiRoute( name=item.name, version=item.version, ast_node=item, ) env[route.name].at_version[route.version] = route return route
python
{ "resource": "" }
q24612
IRGenerator._populate_examples
train
def _populate_examples(self): """Construct every possible example for every type. This is done in two passes. The first pass assigns examples to their associated types, but does not resolve references between examples for different types. This is because the referenced examples may not yet exist. The second pass resolves references. """ for namespace in self.api.namespaces.values(): for data_type in namespace.data_types: for example in data_type._ast_node.examples.values(): data_type._add_example(example) for namespace in self.api.namespaces.values(): for data_type in namespace.data_types: data_type._compute_examples()
python
{ "resource": "" }
q24613
IRGenerator._validate_doc_refs
train
def _validate_doc_refs(self): """ Validates that all the documentation references across every docstring in every spec are formatted properly, have valid values, and make references to valid symbols. """ for namespace in self.api.namespaces.values(): env = self._get_or_create_env(namespace.name) # Validate the doc refs of each api entity that has a doc for data_type in namespace.data_types: if data_type.doc: self._validate_doc_refs_helper( env, data_type.doc, (data_type._ast_node.lineno + 1, data_type._ast_node.path), data_type) for field in data_type.fields: if field.doc: self._validate_doc_refs_helper( env, field.doc, (field._ast_node.lineno + 1, field._ast_node.path), data_type) for route in namespace.routes: if route.doc: self._validate_doc_refs_helper( env, route.doc, (route._ast_node.lineno + 1, route._ast_node.path))
python
{ "resource": "" }
q24614
IRGenerator._validate_annotations
train
def _validate_annotations(self): """ Validates that all annotations are attached to proper types and that no field has conflicting inherited or direct annotations. We need to go through all reference chains to make sure we don't override a redactor set on a parent alias or type """ for namespace in self.api.namespaces.values(): for data_type in namespace.data_types: for field in data_type.fields: if field.redactor: self._validate_field_can_be_tagged_with_redactor(field) for alias in namespace.aliases: if alias.redactor: self._validate_object_can_be_tagged_with_redactor(alias)
python
{ "resource": "" }
q24615
IRGenerator._validate_field_can_be_tagged_with_redactor
train
def _validate_field_can_be_tagged_with_redactor(self, field): """ Validates that the field type can be annotated and that alias does not have conflicting annotations. """ if is_alias(field.data_type): raise InvalidSpec( "Redactors can only be applied to alias definitions, not " "to alias references.", field._ast_node.lineno, field._ast_node.path) self._validate_object_can_be_tagged_with_redactor(field)
python
{ "resource": "" }
q24616
IRGenerator._validate_object_can_be_tagged_with_redactor
train
def _validate_object_can_be_tagged_with_redactor(self, annotated_object): """ Validates that the object type can be annotated and object does not have conflicting annotations. """ data_type = annotated_object.data_type name = annotated_object.name loc = annotated_object._ast_node.lineno, annotated_object._ast_node.path curr_data_type = data_type while isinstance(curr_data_type, Alias) or isinstance(curr_data_type, Nullable): # aliases have redactors assocaited with the type itself if hasattr(curr_data_type, 'redactor') and curr_data_type.redactor: raise InvalidSpec("A redactor has already been defined for '%s' by '%s'." % (str(name), str(curr_data_type.name)), *loc) curr_data_type = curr_data_type.data_type if hasattr(annotated_object, 'redactor') and annotated_object.redactor: if is_map_type(curr_data_type) or is_list_type(curr_data_type): while True: if is_map_type(curr_data_type): curr_data_type = curr_data_type.value_data_type else: curr_data_type = curr_data_type.data_type should_continue = (is_map_type(curr_data_type) or is_list_type(curr_data_type) or is_nullable_type(curr_data_type)) if should_continue is False: break if is_user_defined_type(curr_data_type) or is_void_type(curr_data_type): raise InvalidSpec("Redactors can't be applied to user-defined or void types.", *loc)
python
{ "resource": "" }
q24617
ExampleBackend.generate
train
def generate(self, api): """Generates a file that lists each namespace.""" with self.output_to_relative_path('ex1.out'): for namespace in api.namespaces.values(): self.emit(namespace.name)
python
{ "resource": "" }
q24618
get_zip_class
train
def get_zip_class(): """ Supplement ZipFile class to support context manager for Python 2.6 """ class ContextualZipFile(zipfile.ZipFile): def __enter__(self): return self def __exit__(self, type, value, traceback): self.close return zipfile.ZipFile if hasattr(zipfile.ZipFile, '__exit__') else \ ContextualZipFile
python
{ "resource": "" }
q24619
PythonTypeStubsBackend._generate_typevars
train
def _generate_typevars(self): # type: () -> None """ Creates type variables that are used by the type signatures for _process_custom_annotations. """ self.emit("T = TypeVar('T', bound=bb.AnnotationType)") self.emit("U = TypeVar('U')") self.import_tracker._register_typing_import('TypeVar') self.emit()
python
{ "resource": "" }
q24620
String.validate
train
def validate(self, val): """ A unicode string of the correct length and pattern will pass validation. In PY2, we enforce that a str type must be valid utf-8, and a unicode string will be returned. """ if not isinstance(val, six.string_types): raise ValidationError("'%s' expected to be a string, got %s" % (val, generic_type_name(val))) if not six.PY3 and isinstance(val, str): try: val = val.decode('utf-8') except UnicodeDecodeError: raise ValidationError("'%s' was not valid utf-8") if self.max_length is not None and len(val) > self.max_length: raise ValidationError("'%s' must be at most %d characters, got %d" % (val, self.max_length, len(val))) if self.min_length is not None and len(val) < self.min_length: raise ValidationError("'%s' must be at least %d characters, got %d" % (val, self.min_length, len(val))) if self.pattern and not self.pattern_re.match(val): raise ValidationError("'%s' did not match pattern '%s'" % (val, self.pattern)) return val
python
{ "resource": "" }
q24621
Struct.validate
train
def validate(self, val): """ For a val to pass validation, val must be of the correct type and have all required fields present. """ self.validate_type_only(val) self.validate_fields_only(val) return val
python
{ "resource": "" }
q24622
Struct.validate_with_permissions
train
def validate_with_permissions(self, val, caller_permissions): """ For a val to pass validation, val must be of the correct type and have all required permissioned fields present. Should only be called for callers with extra permissions. """ self.validate(val) self.validate_fields_only_with_permissions(val, caller_permissions) return val
python
{ "resource": "" }
q24623
Struct.validate_fields_only
train
def validate_fields_only(self, val): """ To pass field validation, no required field should be missing. This method assumes that the contents of each field have already been validated on assignment, so it's merely a presence check. FIXME(kelkabany): Since the definition object does not maintain a list of which fields are required, all fields are scanned. """ for field_name in self.definition._all_field_names_: if not hasattr(val, field_name): raise ValidationError("missing required field '%s'" % field_name)
python
{ "resource": "" }
q24624
Struct.validate_fields_only_with_permissions
train
def validate_fields_only_with_permissions(self, val, caller_permissions): """ To pass field validation, no required field should be missing. This method assumes that the contents of each field have already been validated on assignment, so it's merely a presence check. Should only be called for callers with extra permissions. """ self.validate_fields_only(val) # check if type has been patched for extra_permission in caller_permissions.permissions: all_field_names = '_all_{}_field_names_'.format(extra_permission) for field_name in getattr(self.definition, all_field_names, set()): if not hasattr(val, field_name): raise ValidationError("missing required field '%s'" % field_name)
python
{ "resource": "" }
q24625
Union.validate
train
def validate(self, val): """ For a val to pass validation, it must have a _tag set. This assumes that the object validated that _tag is a valid tag, and that any associated value has also been validated. """ self.validate_type_only(val) if not hasattr(val, '_tag') or val._tag is None: raise ValidationError('no tag set') return val
python
{ "resource": "" }
q24626
Api.ensure_namespace
train
def ensure_namespace(self, name): # type: (str) -> ApiNamespace """ Only creates a namespace if it hasn't yet been defined. :param str name: Name of the namespace. :return ApiNamespace: """ if name not in self.namespaces: self.namespaces[name] = ApiNamespace(name) return self.namespaces[name]
python
{ "resource": "" }
q24627
Api.normalize
train
def normalize(self): # type: () -> None """ Alphabetizes namespaces and routes to make spec parsing order mostly irrelevant. """ ordered_namespaces = OrderedDict() # type: NamespaceDict # self.namespaces is currently ordered by declaration order. for namespace_name in sorted(self.namespaces.keys()): ordered_namespaces[namespace_name] = self.namespaces[namespace_name] self.namespaces = ordered_namespaces for namespace in self.namespaces.values(): namespace.normalize()
python
{ "resource": "" }
q24628
ApiNamespace.add_doc
train
def add_doc(self, docstring): # type: (six.text_type) -> None """Adds a docstring for this namespace. The input docstring is normalized to have no leading whitespace and no trailing whitespace except for a newline at the end. If a docstring already exists, the new normalized docstring is appended to the end of the existing one with two newlines separating them. """ assert isinstance(docstring, six.text_type), type(docstring) normalized_docstring = doc_unwrap(docstring) + '\n' if self.doc is None: self.doc = normalized_docstring else: self.doc += normalized_docstring
python
{ "resource": "" }
q24629
ApiNamespace.add_imported_namespace
train
def add_imported_namespace(self, namespace, imported_alias=False, imported_data_type=False, imported_annotation=False, imported_annotation_type=False): # type: (ApiNamespace, bool, bool, bool, bool) -> None """ Keeps track of namespaces that this namespace imports. Args: namespace (Namespace): The imported namespace. imported_alias (bool): Set if this namespace references an alias in the imported namespace. imported_data_type (bool): Set if this namespace references a data type in the imported namespace. imported_annotation (bool): Set if this namespace references a annotation in the imported namespace. imported_annotation_type (bool): Set if this namespace references an annotation in the imported namespace, possibly indirectly (by referencing an annotation elsewhere that has this type). """ assert self.name != namespace.name, \ 'Namespace cannot import itself.' reason = self._imported_namespaces.setdefault(namespace, _ImportReason()) if imported_alias: reason.alias = True if imported_data_type: reason.data_type = True if imported_annotation: reason.annotation = True if imported_annotation_type: reason.annotation_type = True
python
{ "resource": "" }
q24630
ApiNamespace.linearize_data_types
train
def linearize_data_types(self): # type: () -> typing.List[UserDefined] """ Returns a list of all data types used in the namespace. Because the inheritance of data types can be modeled as a DAG, the list will be a linearization of the DAG. It's ideal to generate data types in this order so that composite types that reference other composite types are defined in the correct order. """ linearized_data_types = [] seen_data_types = set() # type: typing.Set[UserDefined] def add_data_type(data_type): # type: (UserDefined) -> None if data_type in seen_data_types: return elif data_type.namespace != self: # We're only concerned with types defined in this namespace. return if is_composite_type(data_type) and data_type.parent_type: add_data_type(data_type.parent_type) linearized_data_types.append(data_type) seen_data_types.add(data_type) for data_type in self.data_types: add_data_type(data_type) return linearized_data_types
python
{ "resource": "" }
q24631
ApiNamespace.linearize_aliases
train
def linearize_aliases(self): # type: () -> typing.List[Alias] """ Returns a list of all aliases used in the namespace. The aliases are ordered to ensure that if they reference other aliases those aliases come earlier in the list. """ linearized_aliases = [] seen_aliases = set() # type: typing.Set[Alias] def add_alias(alias): # type: (Alias) -> None if alias in seen_aliases: return elif alias.namespace != self: return if is_alias(alias.data_type): add_alias(alias.data_type) linearized_aliases.append(alias) seen_aliases.add(alias) for alias in self.aliases: add_alias(alias) return linearized_aliases
python
{ "resource": "" }
q24632
ApiNamespace.get_route_io_data_types
train
def get_route_io_data_types(self): # type: () -> typing.List[UserDefined] """ Returns a list of all user-defined data types that are referenced as either an argument, result, or error of a route. If a List or Nullable data type is referenced, then the contained data type is returned assuming it's a user-defined type. """ data_types = set() # type: typing.Set[UserDefined] for route in self.routes: data_types |= self.get_route_io_data_types_for_route(route) return sorted(data_types, key=lambda dt: dt.name)
python
{ "resource": "" }
q24633
ApiNamespace.get_imported_namespaces
train
def get_imported_namespaces(self, must_have_imported_data_type=False, consider_annotations=False, consider_annotation_types=False): # type: (bool, bool, bool) -> typing.List[ApiNamespace] """ Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and a data type is referenced from it. Namespaces are in ASCII order by name. Args: must_have_imported_data_type (bool): If true, result does not include namespaces that were not imported for data types. consider_annotations (bool): If false, result does not include namespaces that were only imported for annotations consider_annotation_types (bool): If false, result does not include namespaces that were only imported for annotation types. Returns: List[Namespace]: A list of imported namespaces. """ imported_namespaces = [] for imported_namespace, reason in self._imported_namespaces.items(): if must_have_imported_data_type and not reason.data_type: continue if (not consider_annotations) and not ( reason.data_type or reason.alias or reason.annotation_type ): continue if (not consider_annotation_types) and not ( reason.data_type or reason.alias or reason.annotation ): continue imported_namespaces.append(imported_namespace) imported_namespaces.sort(key=lambda n: n.name) return imported_namespaces
python
{ "resource": "" }
q24634
ApiNamespace.get_namespaces_imported_by_route_io
train
def get_namespaces_imported_by_route_io(self): # type: () -> typing.List[ApiNamespace] """ Returns a list of Namespace objects. A namespace is a member of this list if it is imported by the current namespace and has a data type from it referenced as an argument, result, or error of a route. Namespaces are in ASCII order by name. """ namespace_data_types = sorted(self.get_route_io_data_types(), key=lambda dt: dt.name) referenced_namespaces = set() for data_type in namespace_data_types: if data_type.namespace != self: referenced_namespaces.add(data_type.namespace) return sorted(referenced_namespaces, key=lambda n: n.name)
python
{ "resource": "" }
q24635
ApiNamespace.normalize
train
def normalize(self): # type: () -> None """ Alphabetizes routes to make route declaration order irrelevant. """ self.routes.sort(key=lambda route: route.name) self.data_types.sort(key=lambda data_type: data_type.name) self.aliases.sort(key=lambda alias: alias.name) self.annotations.sort(key=lambda annotation: annotation.name)
python
{ "resource": "" }
q24636
ApiRoute.set_attributes
train
def set_attributes(self, deprecated, doc, arg_data_type, result_data_type, error_data_type, attrs): """ Converts a forward reference definition of a route into a full definition. :param DeprecationInfo deprecated: Set if this route is deprecated. :param str doc: Description of the endpoint. :type arg_data_type: :class:`stone.data_type.DataType` :type result_data_type: :class:`stone.data_type.DataType` :type error_data_type: :class:`stone.data_type.DataType` :param dict attrs: Map of string keys to values that are either int, float, bool, str, or None. These are the route attributes assigned in the spec. """ self.deprecated = deprecated self.raw_doc = doc self.doc = doc_unwrap(doc) self.arg_data_type = arg_data_type self.result_data_type = result_data_type self.error_data_type = error_data_type self.attrs = attrs
python
{ "resource": "" }
q24637
ApiRoute.name_with_version
train
def name_with_version(self): """ Get user-friendly representation of the route. :return: Route name with version suffix. The version suffix is omitted for version 1. """ if self.version == 1: return self.name else: return '{}:{}'.format(self.name, self.version)
python
{ "resource": "" }
q24638
UnstoneBackend.generate
train
def generate(self, api): """Main code generator entry point.""" # Create a file for each namespace. for namespace in api.namespaces.values(): with self.output_to_relative_path('%s.stone' % namespace.name): # Output a namespace header. self.emit('namespace %s' % namespace.name) # Output all data type (struct and union) definitions. for data_type in namespace.linearize_data_types(): self.generate_data_type(data_type) # Output all route definitions. for route in namespace.routes: self.generate_route(route)
python
{ "resource": "" }
q24639
UnstoneBackend.generate_route
train
def generate_route(self, route): """Output a route definition.""" self.emit('') self.emit('route %s (%s, %s, %s)' % ( route.name, self.format_data_type(route.arg_data_type), self.format_data_type(route.result_data_type), self.format_data_type(route.error_data_type) )) # Output the docstring. with self.indent(): if route.doc is not None: self.emit(self.format_string(route.doc))
python
{ "resource": "" }
q24640
UnstoneBackend.format_data_type
train
def format_data_type(self, data_type): """Helper function to format a data type. This returns the name if it's a struct or union, otherwise (i.e. for primitive types) it renders the name and the parameters. """ s = data_type.name for type_class, key_list in self._data_type_map: if isinstance(data_type, type_class): args = [] for key in key_list: val = getattr(data_type, key) if val is not None: if isinstance(val, AstTypeRef): sval = val.name elif isinstance(val, DataType): sval = self.format_data_type(val) else: sval = self.format_value(val) args.append(key + '=' + sval) if args: s += '(' + ', '.join(args) + ')' break if data_type.nullable: s += '?' return s
python
{ "resource": "" }
q24641
UnstoneBackend.format_value
train
def format_value(self, val): """Helper function to format a value.""" if isinstance(val, six.text_type): return self.format_string(val) else: return six.text_type(val)
python
{ "resource": "" }
q24642
_create_token
train
def _create_token(token_type, value, lineno, lexpos): """ Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken does not have a constructor defined to make settings these values easy. """ token = lex.LexToken() token.type = token_type token.value = value token.lineno = lineno token.lexpos = lexpos return token
python
{ "resource": "" }
q24643
Lexer.token
train
def token(self): """ Returns the next LexToken. Returns None when all tokens have been exhausted. """ if self.tokens_queue: self.last_token = self.tokens_queue.pop(0) else: r = self.lex.token() if isinstance(r, MultiToken): self.tokens_queue.extend(r.tokens) self.last_token = self.tokens_queue.pop(0) else: if r is None and self.cur_indent > 0: if (self.last_token and self.last_token.type not in ('NEWLINE', 'LINE')): newline_token = _create_token( 'NEWLINE', '\n', self.lex.lineno, self.lex.lexpos) self.tokens_queue.append(newline_token) dedent_count = self.cur_indent dedent_token = _create_token( 'DEDENT', '\t', self.lex.lineno, self.lex.lexpos) self.tokens_queue.extend([dedent_token] * dedent_count) self.cur_indent = 0 self.last_token = self.tokens_queue.pop(0) else: self.last_token = r return self.last_token
python
{ "resource": "" }
q24644
Lexer._create_tokens_for_next_line_dent
train
def _create_tokens_for_next_line_dent(self, newline_token): """ Starting from a newline token that isn't followed by another newline token, returns any indent or dedent tokens that immediately follow. If indentation doesn't change, returns None. """ indent_delta = self._get_next_line_indent_delta(newline_token) if indent_delta is None or indent_delta == 0: # Next line's indent isn't relevant OR there was no change in # indentation. return None dent_type = 'INDENT' if indent_delta > 0 else 'DEDENT' dent_token = _create_token( dent_type, '\t', newline_token.lineno + 1, newline_token.lexpos + len(newline_token.value)) tokens = [dent_token] * abs(indent_delta) self.cur_indent += indent_delta return MultiToken(tokens)
python
{ "resource": "" }
q24645
Lexer._check_for_indent
train
def _check_for_indent(self, newline_token): """ Checks that the line following a newline is indented, otherwise a parsing error is generated. """ indent_delta = self._get_next_line_indent_delta(newline_token) if indent_delta is None or indent_delta == 1: # Next line's indent isn't relevant (e.g. it's a comment) OR # next line is correctly indented. return None else: self.errors.append( ('Line continuation must increment indent by 1.', newline_token.lexer.lineno))
python
{ "resource": "" }
q24646
ObjCBaseBackend._get_imports_m
train
def _get_imports_m(self, data_types, default_imports): """Emits all necessary implementation file imports for the given Stone data type.""" if not isinstance(data_types, list): data_types = [data_types] import_classes = default_imports for data_type in data_types: import_classes.append(fmt_class_prefix(data_type)) if data_type.parent_type: import_classes.append(fmt_class_prefix(data_type.parent_type)) if is_struct_type( data_type) and data_type.has_enumerated_subtypes(): for _, subtype in data_type.get_all_subtypes_with_tags(): import_classes.append(fmt_class_prefix(subtype)) for field in data_type.all_fields: data_type, _ = unwrap_nullable(field.data_type) # unpack list or map while is_list_type(data_type) or is_map_type(data_type): data_type = (data_type.value_data_type if is_map_type(data_type) else data_type.data_type) if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) if import_classes: import_classes = list(set(import_classes)) import_classes.sort() return import_classes
python
{ "resource": "" }
q24647
ObjCBaseBackend._get_imports_h
train
def _get_imports_h(self, data_types): """Emits all necessary header file imports for the given Stone data type.""" if not isinstance(data_types, list): data_types = [data_types] import_classes = [] for data_type in data_types: if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) for field in data_type.all_fields: data_type, _ = unwrap_nullable(field.data_type) # unpack list or map while is_list_type(data_type) or is_map_type(data_type): data_type = (data_type.value_data_type if is_map_type(data_type) else data_type.data_type) if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) import_classes = list(set(import_classes)) import_classes.sort() return import_classes
python
{ "resource": "" }
q24648
ObjCBaseBackend._struct_has_defaults
train
def _struct_has_defaults(self, struct): """Returns whether the given struct has any default values.""" return [ f for f in struct.all_fields if f.has_default or is_nullable_type(f.data_type) ]
python
{ "resource": "" }
q24649
Compiler.build
train
def build(self): """Creates outputs. Outputs are files made by a backend.""" if os.path.exists(self.build_path) and not os.path.isdir(self.build_path): self._logger.error('Output path must be a folder if it already exists') return Compiler._mkdir(self.build_path) self._execute_backend_on_spec()
python
{ "resource": "" }
q24650
Compiler._execute_backend_on_spec
train
def _execute_backend_on_spec(self): """Renders a source file into its final form.""" api_no_aliases_cache = None for attr_key in dir(self.backend_module): attr_value = getattr(self.backend_module, attr_key) if (inspect.isclass(attr_value) and issubclass(attr_value, Backend) and not inspect.isabstract(attr_value)): self._logger.info('Running backend: %s', attr_value.__name__) backend = attr_value(self.build_path, self.backend_args) if backend.preserve_aliases: api = self.api else: if not api_no_aliases_cache: api_no_aliases_cache = remove_aliases_from_api(self.api) api = api_no_aliases_cache try: backend.generate(api) except Exception: # Wrap this exception so that it isn't thought of as a bug # in the stone parser, but rather a bug in the backend. # Remove the last char of the traceback b/c it's a newline. raise BackendException( attr_value.__name__, traceback.format_exc()[:-1])
python
{ "resource": "" }
q24651
EndpointException.to_dict
train
def to_dict(self): """Return a dictionary representation of the exception.""" as_dict = dict(self.payload or ()) as_dict['message'] = self.message return as_dict
python
{ "resource": "" }
q24652
Model.required
train
def required(cls): """Return a list of all columns required by the database to create the resource. :param cls: The Model class to gather attributes from :rtype: list """ columns = [] for column in cls.__table__.columns: # pylint: disable=no-member is_autoincrement = 'int' in str(column.type).lower() and column.autoincrement if (not column.nullable and not column.primary_key) or (column.primary_key and not is_autoincrement): columns.append(column.name) return columns
python
{ "resource": "" }
q24653
Model.optional
train
def optional(cls): """Return a list of all nullable columns for the resource's table. :rtype: list """ columns = [] for column in cls.__table__.columns: # pylint: disable=no-member if column.nullable: columns.append(column.name) return columns
python
{ "resource": "" }
q24654
Model.to_dict
train
def to_dict(self): """Return the resource as a dictionary. :rtype: dict """ result_dict = {} for column in self.__table__.columns.keys(): # pylint: disable=no-member value = result_dict[column] = getattr(self, column, None) if isinstance(value, Decimal): result_dict[column] = float(result_dict[column]) elif isinstance(value, datetime.datetime): result_dict[column] = value.isoformat() return result_dict
python
{ "resource": "" }
q24655
Model.description
train
def description(cls): """Return a field->data type dictionary describing this model as reported by the database. :rtype: dict """ description = {} for column in cls.__table__.columns: # pylint: disable=no-member column_description = str(column.type) if not column.nullable: column_description += ' (required)' description[column.name] = column_description return description
python
{ "resource": "" }
q24656
register_service
train
def register_service(cls, primary_key_type): """Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field """ view_func = cls.as_view(cls.__name__.lower()) # pylint: disable=no-member methods = set(cls.__model__.__methods__) # pylint: disable=no-member if 'GET' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', defaults={'resource_id': None}, view_func=view_func, methods=['GET']) current_app.add_url_rule( '{resource}/meta'.format(resource=cls.__model__.__url__), view_func=view_func, methods=['GET']) if 'POST' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ]) current_app.add_url_rule( '{resource}/<{pk_type}:{pk}>'.format( resource=cls.__model__.__url__, pk='resource_id', pk_type=primary_key_type), view_func=view_func, methods=methods - {'POST'}) current_app.classes.append(cls)
python
{ "resource": "" }
q24657
_reflect_all
train
def _reflect_all(exclude_tables=None, admin=None, read_only=False, schema=None): """Register all tables in the given database as services. :param list exclude_tables: A list of tables to exclude from the API service """ AutomapModel.prepare( # pylint:disable=maybe-no-member db.engine, reflect=True, schema=schema) for cls in AutomapModel.classes: if exclude_tables and cls.__table__.name in exclude_tables: continue if read_only: cls.__methods__ = {'GET'} register_model(cls, admin)
python
{ "resource": "" }
q24658
_register_user_models
train
def _register_user_models(user_models, admin=None, schema=None): """Register any user-defined models with the API Service. :param list user_models: A list of user-defined models to include in the API service """ if any([issubclass(cls, AutomapModel) for cls in user_models]): AutomapModel.prepare( # pylint:disable=maybe-no-member db.engine, reflect=True, schema=schema) for user_model in user_models: register_model(user_model, admin)
python
{ "resource": "" }
q24659
is_valid_method
train
def is_valid_method(model, resource=None): """Return the error message to be sent to the client if the current request passes fails any user-defined validation.""" validation_function_name = 'is_valid_{}'.format( request.method.lower()) if hasattr(model, validation_function_name): return getattr(model, validation_function_name)(request, resource)
python
{ "resource": "" }
q24660
Service.delete
train
def delete(self, resource_id): """Return an HTTP response object resulting from a HTTP DELETE call. :param resource_id: The value of the resource's primary key """ resource = self._resource(resource_id) error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) db.session().delete(resource) db.session().commit() return self._no_content_response()
python
{ "resource": "" }
q24661
Service.get
train
def get(self, resource_id=None): """Return an HTTP response object resulting from an HTTP GET call. If *resource_id* is provided, return just the single resource. Otherwise, return the full collection. :param resource_id: The value of the resource's primary key """ if request.path.endswith('meta'): return self._meta() if resource_id is None: error_message = is_valid_method(self.__model__) if error_message: raise BadRequestException(error_message) if 'export' in request.args: return self._export(self._all_resources()) return flask.jsonify({ self.__json_collection_name__: self._all_resources() }) else: resource = self._resource(resource_id) error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) return jsonify(resource)
python
{ "resource": "" }
q24662
Service.patch
train
def patch(self, resource_id): """Return an HTTP response object resulting from an HTTP PATCH call. :returns: ``HTTP 200`` if the resource already exists :returns: ``HTTP 400`` if the request is malformed :returns: ``HTTP 404`` if the resource is not found :param resource_id: The value of the resource's primary key """ resource = self._resource(resource_id) error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) if not request.json: raise BadRequestException('No JSON data received') resource.update(request.json) db.session().merge(resource) db.session().commit() return jsonify(resource)
python
{ "resource": "" }
q24663
Service.post
train
def post(self): """Return the JSON representation of a new resource created through an HTTP POST call. :returns: ``HTTP 201`` if a resource is properly created :returns: ``HTTP 204`` if the resource already exists :returns: ``HTTP 400`` if the request is malformed or missing data """ resource = self.__model__.query.filter_by(**request.json).first() if resource: error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) return self._no_content_response() resource = self.__model__(**request.json) # pylint: disable=not-callable error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) db.session().add(resource) db.session().commit() return self._created_response(resource)
python
{ "resource": "" }
q24664
Service.put
train
def put(self, resource_id): """Return the JSON representation of a new resource created or updated through an HTTP PUT call. If resource_id is not provided, it is assumed the primary key field is included and a totally new resource is created. Otherwise, the existing resource referred to by *resource_id* is updated with the provided JSON data. This method is idempotent. :returns: ``HTTP 201`` if a new resource is created :returns: ``HTTP 200`` if a resource is updated :returns: ``HTTP 400`` if the request is malformed or missing data """ resource = self.__model__.query.get(resource_id) if resource: error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) resource.update(request.json) db.session().merge(resource) db.session().commit() return jsonify(resource) resource = self.__model__(**request.json) # pylint: disable=not-callable error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) db.session().add(resource) db.session().commit() return self._created_response(resource)
python
{ "resource": "" }
q24665
Service._all_resources
train
def _all_resources(self): """Return the complete collection of resources as a list of dictionaries. :rtype: :class:`sandman2.model.Model` """ queryset = self.__model__.query args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')} limit = None if args: filters = [] order = [] for key, value in args.items(): if value.startswith('%'): filters.append(getattr(self.__model__, key).like(str(value), escape='/')) elif key == 'sort': direction = desc if value.startswith('-') else asc order.append(direction(getattr(self.__model__, value.lstrip('-')))) elif key == 'limit': limit = int(value) elif hasattr(self.__model__, key): filters.append(getattr(self.__model__, key) == value) else: raise BadRequestException('Invalid field [{}]'.format(key)) queryset = queryset.filter(*filters).order_by(*order) if 'page' in request.args: resources = queryset.paginate(page=int(request.args['page']), per_page=limit).items else: queryset = queryset.limit(limit) resources = queryset.all() return [r.to_dict() for r in resources]
python
{ "resource": "" }
q24666
main
train
def main(): """Main entry point for script.""" parser = argparse.ArgumentParser( description='Auto-generate a RESTful API service ' 'from an existing database.' ) parser.add_argument( 'URI', help='Database URI in the format ' 'postgresql+psycopg2://user:password@host/database') parser.add_argument( '-d', '--debug', help='Turn on debug logging', action='store_true', default=False) parser.add_argument( '-p', '--port', help='Port for service to listen on', default=5000) parser.add_argument( '-l', '--local-only', help='Only provide service on localhost (will not be accessible' ' from other machines)', action='store_true', default=False) parser.add_argument( '-r', '--read-only', help='Make all database resources read-only (i.e. only the HTTP GET method is supported)', action='store_true', default=False) parser.add_argument( '-s', '--schema', help='Use this named schema instead of default', default=None) args = parser.parse_args() app = get_app(args.URI, read_only=args.read_only, schema=args.schema) if args.debug: app.config['DEBUG'] = True if args.local_only: host = '127.0.0.1' else: host = '0.0.0.0' app.config['SECRET_KEY'] = '42' app.run(host=host, port=int(args.port))
python
{ "resource": "" }
q24667
etag
train
def etag(func): """Return a decorator that generates proper ETag values for a response. :param func: view function """ @functools.wraps(func) def wrapped(*args, **kwargs): """Call the view function and generate an ETag value, checking the headers to determine what response to send.""" # only for HEAD and GET requests assert request.method in ['HEAD', 'GET'],\ '@etag is only supported for GET requests' response = func(*args, **kwargs) response = make_response(response) etag_value = '"' + hashlib.md5(response.get_data()).hexdigest() + '"' response.headers['ETag'] = etag_value if_match = request.headers.get('If-Match') if_none_match = request.headers.get('If-None-Match') if if_match: etag_list = [tag.strip() for tag in if_match.split(',')] if etag_value not in etag_list and '*' not in etag_list: response = precondition_failed() elif if_none_match: etag_list = [tag.strip() for tag in if_none_match.split(',')] if etag_value in etag_list or '*' in etag_list: response = not_modified() return response return wrapped
python
{ "resource": "" }
q24668
validate_fields
train
def validate_fields(func): """A decorator to automatically detect missing required fields from json data.""" @functools.wraps(func) def decorated(instance, *args, **kwargs): """The decorator function.""" data = request.get_json(force=True, silent=True) if not data: raise BadRequestException('No data received from request') for key in data: if key not in ( instance.__model__.required() + instance.__model__.optional()): raise BadRequestException('Unknown field [{}]'.format(key)) missing = set(instance.__model__.required()) - set(data) if missing: message = 'The following required fields are missing: ' + ', '.join(missing) raise BadRequestException(message) return func(instance, *args, **kwargs) return decorated
python
{ "resource": "" }
q24669
merge_record_extra
train
def merge_record_extra(record, target, reserved): """ Merges extra attributes from LogRecord object into target dictionary :param record: logging.LogRecord :param target: dict to update :param reserved: dict or list with reserved keys to skip """ for key, value in record.__dict__.items(): # this allows to have numeric keys if (key not in reserved and not (hasattr(key, "startswith") and key.startswith('_'))): target[key] = value return target
python
{ "resource": "" }
q24670
JsonFormatter._str_to_fn
train
def _str_to_fn(self, fn_as_str): """ If the argument is not a string, return whatever was passed in. Parses a string such as package.module.function, imports the module and returns the function. :param fn_as_str: The string to parse. If not a string, return it. """ if not isinstance(fn_as_str, str): return fn_as_str path, _, function = fn_as_str.rpartition('.') module = importlib.import_module(path) return getattr(module, function)
python
{ "resource": "" }
q24671
JsonFormatter.parse
train
def parse(self): """ Parses format string looking for substitutions This method is responsible for returning a list of fields (as strings) to include in all log messages. """ standard_formatters = re.compile(r'\((.+?)\)', re.IGNORECASE) return standard_formatters.findall(self._fmt)
python
{ "resource": "" }
q24672
JsonFormatter.add_fields
train
def add_fields(self, log_record, record, message_dict): """ Override this method to implement custom logic for adding fields. """ for field in self._required_fields: log_record[field] = record.__dict__.get(field) log_record.update(message_dict) merge_record_extra(record, log_record, reserved=self._skip_fields) if self.timestamp: key = self.timestamp if type(self.timestamp) == str else 'timestamp' log_record[key] = datetime.utcnow()
python
{ "resource": "" }
q24673
JsonFormatter.jsonify_log_record
train
def jsonify_log_record(self, log_record): """Returns a json string of the log record.""" return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder, indent=self.json_indent, ensure_ascii=self.json_ensure_ascii)
python
{ "resource": "" }
q24674
JsonFormatter.format
train
def format(self, record): """Formats a log record and serializes to json""" message_dict = {} if isinstance(record.msg, dict): message_dict = record.msg record.message = None else: record.message = record.getMessage() # only format time if needed if "asctime" in self._required_fields: record.asctime = self.formatTime(record, self.datefmt) # Display formatted exception, but allow overriding it in the # user-supplied dict. if record.exc_info and not message_dict.get('exc_info'): message_dict['exc_info'] = self.formatException(record.exc_info) if not message_dict.get('exc_info') and record.exc_text: message_dict['exc_info'] = record.exc_text # Display formatted record of stack frames # default format is a string returned from :func:`traceback.print_stack` try: if record.stack_info and not message_dict.get('stack_info'): message_dict['stack_info'] = self.formatStack(record.stack_info) except AttributeError: # Python2.7 doesn't have stack_info. pass try: log_record = OrderedDict() except NameError: log_record = {} self.add_fields(log_record, record, message_dict) log_record = self.process_log_record(log_record) return "%s%s" % (self.prefix, self.jsonify_log_record(log_record))
python
{ "resource": "" }
q24675
odometry._get_file_lists
train
def _get_file_lists(self): """Find and list data files for each sensor.""" self.cam0_files = sorted(glob.glob( os.path.join(self.sequence_path, 'image_0', '*.{}'.format(self.imtype)))) self.cam1_files = sorted(glob.glob( os.path.join(self.sequence_path, 'image_1', '*.{}'.format(self.imtype)))) self.cam2_files = sorted(glob.glob( os.path.join(self.sequence_path, 'image_2', '*.{}'.format(self.imtype)))) self.cam3_files = sorted(glob.glob( os.path.join(self.sequence_path, 'image_3', '*.{}'.format(self.imtype)))) self.velo_files = sorted(glob.glob( os.path.join(self.sequence_path, 'velodyne', '*.bin'))) # Subselect the chosen range of frames, if any if self.frames is not None: self.cam0_files = utils.subselect_files( self.cam0_files, self.frames) self.cam1_files = utils.subselect_files( self.cam1_files, self.frames) self.cam2_files = utils.subselect_files( self.cam2_files, self.frames) self.cam3_files = utils.subselect_files( self.cam3_files, self.frames) self.velo_files = utils.subselect_files( self.velo_files, self.frames)
python
{ "resource": "" }
q24676
rotx
train
def rotx(t): """Rotation about the x-axis.""" c = np.cos(t) s = np.sin(t) return np.array([[1, 0, 0], [0, c, -s], [0, s, c]])
python
{ "resource": "" }
q24677
roty
train
def roty(t): """Rotation about the y-axis.""" c = np.cos(t) s = np.sin(t) return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
python
{ "resource": "" }
q24678
rotz
train
def rotz(t): """Rotation about the z-axis.""" c = np.cos(t) s = np.sin(t) return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
python
{ "resource": "" }
q24679
transform_from_rot_trans
train
def transform_from_rot_trans(R, t): """Transforation matrix from rotation matrix and translation vector.""" R = R.reshape(3, 3) t = t.reshape(3, 1) return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
python
{ "resource": "" }
q24680
read_calib_file
train
def read_calib_file(filepath): """Read in a calibration file and parse into a dictionary.""" data = {} with open(filepath, 'r') as f: for line in f.readlines(): key, value = line.split(':', 1) # The only non-float values in these files are dates, which # we don't care about anyway try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass return data
python
{ "resource": "" }
q24681
load_oxts_packets_and_poses
train
def load_oxts_packets_and_poses(oxts_files): """Generator to read OXTS ground truth data. Poses are given in an East-North-Up coordinate system whose origin is the first GPS position. """ # Scale for Mercator projection (from first lat value) scale = None # Origin of the global coordinate system (first GPS position) origin = None oxts = [] for filename in oxts_files: with open(filename, 'r') as f: for line in f.readlines(): line = line.split() # Last five entries are flags and counts line[:-5] = [float(x) for x in line[:-5]] line[-5:] = [int(float(x)) for x in line[-5:]] packet = OxtsPacket(*line) if scale is None: scale = np.cos(packet.lat * np.pi / 180.) R, t = pose_from_oxts_packet(packet, scale) if origin is None: origin = t T_w_imu = transform_from_rot_trans(R, t - origin) oxts.append(OxtsData(packet, T_w_imu)) return oxts
python
{ "resource": "" }
q24682
load_velo_scan
train
def load_velo_scan(file): """Load and parse a velodyne binary file.""" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4))
python
{ "resource": "" }
q24683
raw._load_calib_rigid
train
def _load_calib_rigid(self, filename): """Read a rigid transform calibration file as a numpy.array.""" filepath = os.path.join(self.calib_path, filename) data = utils.read_calib_file(filepath) return utils.transform_from_rot_trans(data['R'], data['T'])
python
{ "resource": "" }
q24684
SecretsCollection.load_baseline_from_string
train
def load_baseline_from_string(cls, string): """Initializes a SecretsCollection object from string. :type string: str :param string: string to load SecretsCollection from. :rtype: SecretsCollection :raises: IOError """ try: return cls.load_baseline_from_dict(json.loads(string)) except (IOError, ValueError): log.error('Incorrectly formatted baseline!') raise
python
{ "resource": "" }
q24685
SecretsCollection.load_baseline_from_dict
train
def load_baseline_from_dict(cls, data): """Initializes a SecretsCollection object from dictionary. :type data: dict :param data: properly formatted dictionary to load SecretsCollection from. :rtype: SecretsCollection :raises: IOError """ result = SecretsCollection() if not all(key in data for key in ( 'plugins_used', 'results', )): raise IOError # In v0.12.0 `exclude_regex` got replaced by `exclude` if not any(key in data for key in ( 'exclude', 'exclude_regex', )): raise IOError if 'exclude_regex' in data: result.exclude_files = data['exclude_regex'] else: result.exclude_files = data['exclude']['files'] result.exclude_lines = data['exclude']['lines'] plugins = [] for plugin in data['plugins_used']: plugin_classname = plugin.pop('name') plugins.append(initialize.from_plugin_classname( plugin_classname, exclude_lines_regex=result.exclude_lines, **plugin )) result.plugins = tuple(plugins) for filename in data['results']: result.data[filename] = {} for item in data['results'][filename]: secret = PotentialSecret( item['type'], filename, secret='will be replaced', lineno=item['line_number'], is_secret=item.get('is_secret'), ) secret.secret_hash = item['hashed_secret'] result.data[filename][secret] = secret result.version = ( data['version'] if 'version' in data else '0.0.0' ) return result
python
{ "resource": "" }
q24686
SecretsCollection.scan_diff
train
def scan_diff( self, diff, baseline_filename='', last_commit_hash='', repo_name='', ): """For optimization purposes, our scanning strategy focuses on looking at incremental differences, rather than re-scanning the codebase every time. This function supports this, and adds information to self.data. :type diff: str :param diff: diff string. e.g. The output of `git diff <fileA> <fileB>` :type baseline_filename: str :param baseline_filename: if there are any baseline secrets, then the baseline file will have hashes in them. By specifying it, we can skip this clear exception. :type last_commit_hash: str :param last_commit_hash: used for logging only -- the last commit hash we saved :type repo_name: str :param repo_name: used for logging only -- the name of the repo """ # Local imports, so that we don't need to require unidiff for versions of # detect-secrets that don't use it. from unidiff import PatchSet from unidiff.errors import UnidiffParseError try: patch_set = PatchSet.from_string(diff) except UnidiffParseError: # pragma: no cover alert = { 'alert': 'UnidiffParseError', 'hash': last_commit_hash, 'repo_name': repo_name, } log.error(alert) raise if self.exclude_files: regex = re.compile(self.exclude_files, re.IGNORECASE) for patch_file in patch_set: filename = patch_file.path # If the file matches the exclude_files, we skip it if self.exclude_files and regex.search(filename): continue if filename == baseline_filename: continue for results, plugin in self._results_accumulator(filename): results.update( self._extract_secrets_from_patch( patch_file, plugin, filename, ), )
python
{ "resource": "" }
q24687
SecretsCollection.scan_file
train
def scan_file(self, filename, filename_key=None): """Scans a specified file, and adds information to self.data :type filename: str :param filename: full path to file to scan. :type filename_key: str :param filename_key: key to store in self.data :returns: boolean; though this value is only used for testing """ if not filename_key: filename_key = filename if os.path.islink(filename): return False try: with codecs.open(filename, encoding='utf-8') as f: self._extract_secrets_from_file(f, filename_key) return True except IOError: log.warning("Unable to open file: %s", filename) return False
python
{ "resource": "" }
q24688
SecretsCollection.get_secret
train
def get_secret(self, filename, secret, type_=None): """Checks to see whether a secret is found in the collection. :type filename: str :param filename: the file to search in. :type secret: str :param secret: secret hash of secret to search for. :type type_: str :param type_: type of secret, if known. :rtype: PotentialSecret|None """ if filename not in self.data: return None if type_: # Optimized lookup, because we know the type of secret # (and therefore, its hash) tmp_secret = PotentialSecret(type_, filename, secret='will be overriden') tmp_secret.secret_hash = secret if tmp_secret in self.data[filename]: return self.data[filename][tmp_secret] return None # NOTE: We can only optimize this, if we knew the type of secret. # Otherwise, we need to iterate through the set and find out. for obj in self.data[filename]: if obj.secret_hash == secret: return obj return None
python
{ "resource": "" }
q24689
SecretsCollection._extract_secrets_from_file
train
def _extract_secrets_from_file(self, f, filename): """Extract secrets from a given file object. :type f: File object :type filename: string """ try: log.info("Checking file: %s", filename) for results, plugin in self._results_accumulator(filename): results.update(plugin.analyze(f, filename)) f.seek(0) except UnicodeDecodeError: log.warning("%s failed to load.", filename)
python
{ "resource": "" }
q24690
SecretsCollection._extract_secrets_from_patch
train
def _extract_secrets_from_patch(self, f, plugin, filename): """Extract secrets from a given patch file object. Note that we only want to capture incoming secrets (so added lines). :type f: unidiff.patch.PatchedFile :type plugin: detect_secrets.plugins.base.BasePlugin :type filename: str """ output = {} for chunk in f: # target_lines refers to incoming (new) changes for line in chunk.target_lines(): if line.is_added: output.update( plugin.analyze_string( line.value, line.target_line_no, filename, ), ) return output
python
{ "resource": "" }
q24691
YamlFileParser.get_ignored_lines
train
def get_ignored_lines(self): """ Return a set of integers that refer to line numbers that were whitelisted by the user and should be ignored. We need to parse the file separately from PyYAML parsing because the parser drops the comments (at least up to version 3.13): https://github.com/yaml/pyyaml/blob/a2d481b8dbd2b352cb001f07091ccf669227290f/lib3/yaml/scanner.py#L749 :return: set """ ignored_lines = set() for line_number, line in enumerate(self.content.split('\n'), 1): if ( WHITELIST_REGEX['yaml'].search(line) or ( self.exclude_lines_regex and self.exclude_lines_regex.search(line) ) ): ignored_lines.add(line_number) return ignored_lines
python
{ "resource": "" }
q24692
IniFileParser._get_value_and_line_offset
train
def _get_value_and_line_offset(self, key, values): """Returns the index of the location of key, value pair in lines. :type key: str :param key: key, in config file. :type values: str :param values: values for key, in config file. This is plural, because you can have multiple values per key. e.g. >>> key = ... value1 ... value2 :type lines: list :param lines: a collection of lines-so-far in file :rtype: list(tuple) """ values_list = self._construct_values_list(values) if not values_list: return [] current_value_list_index = 0 output = [] lines_modified = False for index, line in enumerate(self.lines): # Check ignored lines before checking values, because # you can write comments *after* the value. if not line.strip() or self._comment_regex.match(line): continue if ( self.exclude_lines_regex and self.exclude_lines_regex.search(line) ): continue if current_value_list_index == 0: first_line_regex = re.compile(r'^\s*{}[ :=]+{}'.format( re.escape(key), re.escape(values_list[current_value_list_index]), )) if first_line_regex.match(line): output.append(( values_list[current_value_list_index], self.line_offset + index + 1, )) current_value_list_index += 1 continue if current_value_list_index == len(values_list): if index == 0: index = 1 # don't want to count the same line again self.line_offset += index self.lines = self.lines[index:] lines_modified = True break else: output.append(( values_list[current_value_list_index], self.line_offset + index + 1, )) current_value_list_index += 1 if not lines_modified: # No more lines left, if loop was not explicitly left. self.lines = [] return output
python
{ "resource": "" }
q24693
_get_baseline_string_from_file
train
def _get_baseline_string_from_file(filename): # pragma: no cover """Breaking this function up for mockability.""" try: with open(filename) as f: return f.read() except IOError: log.error( 'Unable to open baseline file: {}\n' 'Please create it via\n' ' `detect-secrets scan > {}`\n' .format(filename, filename), ) raise
python
{ "resource": "" }
q24694
raise_exception_if_baseline_file_is_unstaged
train
def raise_exception_if_baseline_file_is_unstaged(filename): """We want to make sure that if there are changes to the baseline file, they will be included in the commit. This way, we can keep our baselines up-to-date. :raises: ValueError """ try: files_changed_but_not_staged = subprocess.check_output( [ 'git', 'diff', '--name-only', ], ).split() except subprocess.CalledProcessError: # Since we don't pipe stderr, we get free logging through git. raise ValueError if filename.encode() in files_changed_but_not_staged: log.error(( 'Your baseline file ({}) is unstaged.\n' '`git add {}` to fix this.' ).format( filename, filename, )) raise ValueError
python
{ "resource": "" }
q24695
compare_baselines
train
def compare_baselines(old_baseline_filename, new_baseline_filename): """ This function enables developers to more easily configure plugin settings, by comparing two generated baselines and highlighting their differences. For effective use, a few assumptions are made: 1. Baselines are sorted by (filename, line_number, hash). This allows for a deterministic order, when doing a side-by-side comparison. 2. Baselines are generated for the same codebase snapshot. This means that we won't have cases where secrets are moved around; only added or removed. NOTE: We don't want to do a version check, because we want to be able to use this functionality across versions (to see how the new version fares compared to the old one). """ if old_baseline_filename == new_baseline_filename: raise RedundantComparisonError old_baseline = _get_baseline_from_file(old_baseline_filename) new_baseline = _get_baseline_from_file(new_baseline_filename) _remove_nonexistent_files_from_baseline(old_baseline) _remove_nonexistent_files_from_baseline(new_baseline) # We aggregate the secrets first, so that we can display a total count. secrets_to_compare = _get_secrets_to_compare(old_baseline, new_baseline) total_reviews = len(secrets_to_compare) current_index = 0 secret_iterator = BidirectionalIterator(secrets_to_compare) for filename, secret, is_removed in secret_iterator: _clear_screen() current_index += 1 header = '{} {}' if is_removed: plugins_used = old_baseline['plugins_used'] header = header.format( colorize('Status:', AnsiColor.BOLD), '>> {} <<'.format( colorize('REMOVED', AnsiColor.RED), ), ) else: plugins_used = new_baseline['plugins_used'] header = header.format( colorize('Status:', AnsiColor.BOLD), '>> {} <<'.format( colorize('ADDED', AnsiColor.LIGHT_GREEN), ), ) try: _print_context( filename, secret, current_index, total_reviews, plugins_used, additional_header_lines=header, force=is_removed, ) decision = _get_user_decision( can_step_back=secret_iterator.can_step_back(), prompt_secret_decision=False, ) except SecretNotFoundOnSpecifiedLineError: decision = _get_user_decision(prompt_secret_decision=False) if decision == 'q': print('Quitting...') break if decision == 'b': # pragma: no cover current_index -= 2 secret_iterator.step_back_on_next_iteration()
python
{ "resource": "" }
q24696
_secret_generator
train
def _secret_generator(baseline): """Generates secrets to audit, from the baseline""" for filename, secrets in baseline['results'].items(): for secret in secrets: yield filename, secret
python
{ "resource": "" }
q24697
_get_secret_with_context
train
def _get_secret_with_context( filename, secret, plugin_settings, lines_of_context=5, force=False, ): """ Displays the secret, with surrounding lines of code for better context. :type filename: str :param filename: filename where secret resides in :type secret: dict, PotentialSecret.json() format :param secret: the secret listed in baseline :type plugin_settings: list :param plugin_settings: plugins used to create baseline. :type lines_of_context: int :param lines_of_context: number of lines displayed before and after secret. :type force: bool :param force: if True, will print the lines of code even if it doesn't find the secret expected :raises: SecretNotFoundOnSpecifiedLineError """ snippet = CodeSnippetHighlighter().get_code_snippet( filename, secret['line_number'], lines_of_context=lines_of_context, ) try: raw_secret_value = get_raw_secret_value( snippet.target_line, secret, plugin_settings, filename, ) snippet.highlight_line(raw_secret_value) except SecretNotFoundOnSpecifiedLineError: if not force: raise snippet.target_line = colorize( snippet.target_line, AnsiColor.BOLD, ) return snippet.add_line_numbers()
python
{ "resource": "" }
q24698
raw_secret_generator
train
def raw_secret_generator(plugin, secret_line, filetype): """Generates raw secrets by re-scanning the line, with the specified plugin :type plugin: BasePlugin :type secret_line: str :type filetype: FileType """ for raw_secret in plugin.secret_generator(secret_line, filetype=filetype): yield raw_secret if issubclass(plugin.__class__, HighEntropyStringsPlugin): with plugin.non_quoted_string_regex(strict=False): for raw_secret in plugin.secret_generator(secret_line): yield raw_secret
python
{ "resource": "" }
q24699
PluginOptions.consolidate_args
train
def consolidate_args(args): """There are many argument fields related to configuring plugins. This function consolidates all of them, and saves the consolidated information in args.plugins. Note that we're deferring initialization of those plugins, because plugins may have various initialization values, referenced in different places. :param args: output of `argparse.ArgumentParser.parse_args` """ # Using `--hex-limit` as a canary to identify whether this # consolidation is appropriate. if not hasattr(args, 'hex_limit'): return active_plugins = {} is_using_default_value = {} for plugin in PluginOptions.all_plugins: arg_name = PluginOptions._convert_flag_text_to_argument_name( plugin.disable_flag_text, ) # Remove disabled plugins is_disabled = getattr(args, arg_name, False) delattr(args, arg_name) if is_disabled: continue # Consolidate related args related_args = {} for related_arg_tuple in plugin.related_args: try: flag_name, default_value = related_arg_tuple except ValueError: flag_name = related_arg_tuple default_value = None arg_name = PluginOptions._convert_flag_text_to_argument_name( flag_name, ) related_args[arg_name] = getattr(args, arg_name) delattr(args, arg_name) if default_value and related_args[arg_name] is None: related_args[arg_name] = default_value is_using_default_value[arg_name] = True active_plugins.update({ plugin.classname: related_args, }) args.plugins = active_plugins args.is_using_default_value = is_using_default_value
python
{ "resource": "" }