id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
233,100
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
get_superclasses_from_class_definition
def get_superclasses_from_class_definition(class_definition): """Extract a list of all superclass names from a class definition dict.""" # New-style superclasses definition, supporting multiple-inheritance. superclasses = class_definition.get('superClasses', None) if superclasses: return list(superclasses) # Old-style superclass definition, single inheritance only. superclass = class_definition.get('superClass', None) if superclass: return [superclass] # No superclasses are present. return []
python
def get_superclasses_from_class_definition(class_definition): """Extract a list of all superclass names from a class definition dict.""" # New-style superclasses definition, supporting multiple-inheritance. superclasses = class_definition.get('superClasses', None) if superclasses: return list(superclasses) # Old-style superclass definition, single inheritance only. superclass = class_definition.get('superClass', None) if superclass: return [superclass] # No superclasses are present. return []
[ "def", "get_superclasses_from_class_definition", "(", "class_definition", ")", ":", "# New-style superclasses definition, supporting multiple-inheritance.", "superclasses", "=", "class_definition", ".", "get", "(", "'superClasses'", ",", "None", ")", "if", "superclasses", ":", "return", "list", "(", "superclasses", ")", "# Old-style superclass definition, single inheritance only.", "superclass", "=", "class_definition", ".", "get", "(", "'superClass'", ",", "None", ")", "if", "superclass", ":", "return", "[", "superclass", "]", "# No superclasses are present.", "return", "[", "]" ]
Extract a list of all superclass names from a class definition dict.
[ "Extract", "a", "list", "of", "all", "superclass", "names", "from", "a", "class", "definition", "dict", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L74-L88
233,101
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaElement.freeze
def freeze(self): """Make the SchemaElement's connections immutable.""" self.in_connections = frozenset(self.in_connections) self.out_connections = frozenset(self.out_connections)
python
def freeze(self): """Make the SchemaElement's connections immutable.""" self.in_connections = frozenset(self.in_connections) self.out_connections = frozenset(self.out_connections)
[ "def", "freeze", "(", "self", ")", ":", "self", ".", "in_connections", "=", "frozenset", "(", "self", ".", "in_connections", ")", "self", ".", "out_connections", "=", "frozenset", "(", "self", ".", "out_connections", ")" ]
Make the SchemaElement's connections immutable.
[ "Make", "the", "SchemaElement", "s", "connections", "immutable", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L180-L183
233,102
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.get_default_property_values
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
python
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
[ "def", "get_default_property_values", "(", "self", ",", "classname", ")", ":", "schema_element", "=", "self", ".", "get_element_by_class_name", "(", "classname", ")", "result", "=", "{", "property_name", ":", "property_descriptor", ".", "default", "for", "property_name", ",", "property_descriptor", "in", "six", ".", "iteritems", "(", "schema_element", ".", "properties", ")", "}", "if", "schema_element", ".", "is_edge", ":", "# Remove the source/destination properties for edges, if they exist.", "result", ".", "pop", "(", "EDGE_SOURCE_PROPERTY_NAME", ",", "None", ")", "result", ".", "pop", "(", "EDGE_DESTINATION_PROPERTY_NAME", ",", "None", ")", "return", "result" ]
Return a dict with default values for all properties declared on this class.
[ "Return", "a", "dict", "with", "default", "values", "for", "all", "properties", "declared", "on", "this", "class", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L297-L311
233,103
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph._get_property_values_with_defaults
def _get_property_values_with_defaults(self, classname, property_values): """Return the property values for the class, with default values applied where needed.""" # To uphold OrientDB semantics, make a new dict with all property values set # to their default values, which are None if no default was set. # Then, overwrite its data with the supplied property values. final_values = self.get_default_property_values(classname) final_values.update(property_values) return final_values
python
def _get_property_values_with_defaults(self, classname, property_values): """Return the property values for the class, with default values applied where needed.""" # To uphold OrientDB semantics, make a new dict with all property values set # to their default values, which are None if no default was set. # Then, overwrite its data with the supplied property values. final_values = self.get_default_property_values(classname) final_values.update(property_values) return final_values
[ "def", "_get_property_values_with_defaults", "(", "self", ",", "classname", ",", "property_values", ")", ":", "# To uphold OrientDB semantics, make a new dict with all property values set", "# to their default values, which are None if no default was set.", "# Then, overwrite its data with the supplied property values.", "final_values", "=", "self", ".", "get_default_property_values", "(", "classname", ")", "final_values", ".", "update", "(", "property_values", ")", "return", "final_values" ]
Return the property values for the class, with default values applied where needed.
[ "Return", "the", "property", "values", "for", "the", "class", "with", "default", "values", "applied", "where", "needed", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L313-L320
233,104
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.get_element_by_class_name_or_raise
def get_element_by_class_name_or_raise(self, class_name): """Return the SchemaElement for the specified class name, asserting that it exists.""" if class_name not in self._elements: raise InvalidClassError(u'Class does not exist: {}'.format(class_name)) return self._elements[class_name]
python
def get_element_by_class_name_or_raise(self, class_name): """Return the SchemaElement for the specified class name, asserting that it exists.""" if class_name not in self._elements: raise InvalidClassError(u'Class does not exist: {}'.format(class_name)) return self._elements[class_name]
[ "def", "get_element_by_class_name_or_raise", "(", "self", ",", "class_name", ")", ":", "if", "class_name", "not", "in", "self", ".", "_elements", ":", "raise", "InvalidClassError", "(", "u'Class does not exist: {}'", ".", "format", "(", "class_name", ")", ")", "return", "self", ".", "_elements", "[", "class_name", "]" ]
Return the SchemaElement for the specified class name, asserting that it exists.
[ "Return", "the", "SchemaElement", "for", "the", "specified", "class", "name", "asserting", "that", "it", "exists", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L322-L327
233,105
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.get_vertex_schema_element_or_raise
def get_vertex_schema_element_or_raise(self, vertex_classname): """Return the schema element with the given name, asserting that it's of vertex type.""" schema_element = self.get_element_by_class_name_or_raise(vertex_classname) if not schema_element.is_vertex: raise InvalidClassError(u'Non-vertex class provided: {}'.format(vertex_classname)) return schema_element
python
def get_vertex_schema_element_or_raise(self, vertex_classname): """Return the schema element with the given name, asserting that it's of vertex type.""" schema_element = self.get_element_by_class_name_or_raise(vertex_classname) if not schema_element.is_vertex: raise InvalidClassError(u'Non-vertex class provided: {}'.format(vertex_classname)) return schema_element
[ "def", "get_vertex_schema_element_or_raise", "(", "self", ",", "vertex_classname", ")", ":", "schema_element", "=", "self", ".", "get_element_by_class_name_or_raise", "(", "vertex_classname", ")", "if", "not", "schema_element", ".", "is_vertex", ":", "raise", "InvalidClassError", "(", "u'Non-vertex class provided: {}'", ".", "format", "(", "vertex_classname", ")", ")", "return", "schema_element" ]
Return the schema element with the given name, asserting that it's of vertex type.
[ "Return", "the", "schema", "element", "with", "the", "given", "name", "asserting", "that", "it", "s", "of", "vertex", "type", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L329-L336
233,106
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.get_edge_schema_element_or_raise
def get_edge_schema_element_or_raise(self, edge_classname): """Return the schema element with the given name, asserting that it's of edge type.""" schema_element = self.get_element_by_class_name_or_raise(edge_classname) if not schema_element.is_edge: raise InvalidClassError(u'Non-edge class provided: {}'.format(edge_classname)) return schema_element
python
def get_edge_schema_element_or_raise(self, edge_classname): """Return the schema element with the given name, asserting that it's of edge type.""" schema_element = self.get_element_by_class_name_or_raise(edge_classname) if not schema_element.is_edge: raise InvalidClassError(u'Non-edge class provided: {}'.format(edge_classname)) return schema_element
[ "def", "get_edge_schema_element_or_raise", "(", "self", ",", "edge_classname", ")", ":", "schema_element", "=", "self", ".", "get_element_by_class_name_or_raise", "(", "edge_classname", ")", "if", "not", "schema_element", ".", "is_edge", ":", "raise", "InvalidClassError", "(", "u'Non-edge class provided: {}'", ".", "format", "(", "edge_classname", ")", ")", "return", "schema_element" ]
Return the schema element with the given name, asserting that it's of edge type.
[ "Return", "the", "schema", "element", "with", "the", "given", "name", "asserting", "that", "it", "s", "of", "edge", "type", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L338-L345
233,107
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.validate_is_non_abstract_vertex_type
def validate_is_non_abstract_vertex_type(self, vertex_classname): """Validate that a vertex classname corresponds to a non-abstract vertex class.""" element = self.get_vertex_schema_element_or_raise(vertex_classname) if element.abstract: raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract' .format(vertex_classname))
python
def validate_is_non_abstract_vertex_type(self, vertex_classname): """Validate that a vertex classname corresponds to a non-abstract vertex class.""" element = self.get_vertex_schema_element_or_raise(vertex_classname) if element.abstract: raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract' .format(vertex_classname))
[ "def", "validate_is_non_abstract_vertex_type", "(", "self", ",", "vertex_classname", ")", ":", "element", "=", "self", ".", "get_vertex_schema_element_or_raise", "(", "vertex_classname", ")", "if", "element", ".", "abstract", ":", "raise", "InvalidClassError", "(", "u'Expected a non-abstract vertex class, but {} is abstract'", ".", "format", "(", "vertex_classname", ")", ")" ]
Validate that a vertex classname corresponds to a non-abstract vertex class.
[ "Validate", "that", "a", "vertex", "classname", "corresponds", "to", "a", "non", "-", "abstract", "vertex", "class", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L355-L361
233,108
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.validate_is_non_abstract_edge_type
def validate_is_non_abstract_edge_type(self, edge_classname): """Validate that a edge classname corresponds to a non-abstract edge class.""" element = self.get_edge_schema_element_or_raise(edge_classname) if element.abstract: raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract' .format(edge_classname))
python
def validate_is_non_abstract_edge_type(self, edge_classname): """Validate that a edge classname corresponds to a non-abstract edge class.""" element = self.get_edge_schema_element_or_raise(edge_classname) if element.abstract: raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract' .format(edge_classname))
[ "def", "validate_is_non_abstract_edge_type", "(", "self", ",", "edge_classname", ")", ":", "element", "=", "self", ".", "get_edge_schema_element_or_raise", "(", "edge_classname", ")", "if", "element", ".", "abstract", ":", "raise", "InvalidClassError", "(", "u'Expected a non-abstract vertex class, but {} is abstract'", ".", "format", "(", "edge_classname", ")", ")" ]
Validate that a edge classname corresponds to a non-abstract edge class.
[ "Validate", "that", "a", "edge", "classname", "corresponds", "to", "a", "non", "-", "abstract", "edge", "class", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L363-L369
233,109
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph.validate_properties_exist
def validate_properties_exist(self, classname, property_names): """Validate that the specified property names are indeed defined on the given class.""" schema_element = self.get_element_by_class_name(classname) requested_properties = set(property_names) available_properties = set(schema_element.properties.keys()) non_existent_properties = requested_properties - available_properties if non_existent_properties: raise InvalidPropertyError( u'Class "{}" does not have definitions for properties "{}": ' u'{}'.format(classname, non_existent_properties, property_names))
python
def validate_properties_exist(self, classname, property_names): """Validate that the specified property names are indeed defined on the given class.""" schema_element = self.get_element_by_class_name(classname) requested_properties = set(property_names) available_properties = set(schema_element.properties.keys()) non_existent_properties = requested_properties - available_properties if non_existent_properties: raise InvalidPropertyError( u'Class "{}" does not have definitions for properties "{}": ' u'{}'.format(classname, non_existent_properties, property_names))
[ "def", "validate_properties_exist", "(", "self", ",", "classname", ",", "property_names", ")", ":", "schema_element", "=", "self", ".", "get_element_by_class_name", "(", "classname", ")", "requested_properties", "=", "set", "(", "property_names", ")", "available_properties", "=", "set", "(", "schema_element", ".", "properties", ".", "keys", "(", ")", ")", "non_existent_properties", "=", "requested_properties", "-", "available_properties", "if", "non_existent_properties", ":", "raise", "InvalidPropertyError", "(", "u'Class \"{}\" does not have definitions for properties \"{}\": '", "u'{}'", ".", "format", "(", "classname", ",", "non_existent_properties", ",", "property_names", ")", ")" ]
Validate that the specified property names are indeed defined on the given class.
[ "Validate", "that", "the", "specified", "property", "names", "are", "indeed", "defined", "on", "the", "given", "class", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L371-L381
233,110
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph._split_classes_by_kind
def _split_classes_by_kind(self, class_name_to_definition): """Assign each class to the vertex, edge or non-graph type sets based on its kind.""" for class_name in class_name_to_definition: inheritance_set = self._inheritance_sets[class_name] is_vertex = ORIENTDB_BASE_VERTEX_CLASS_NAME in inheritance_set is_edge = ORIENTDB_BASE_EDGE_CLASS_NAME in inheritance_set if is_vertex and is_edge: raise AssertionError(u'Class {} appears to be both a vertex and an edge class: ' u'{}'.format(class_name, inheritance_set)) elif is_vertex: self._vertex_class_names.add(class_name) elif is_edge: self._edge_class_names.add(class_name) else: self._non_graph_class_names.add(class_name) # Freeze the classname sets so they cannot be modified again. self._vertex_class_names = frozenset(self._vertex_class_names) self._edge_class_names = frozenset(self._edge_class_names) self._non_graph_class_names = frozenset(self._non_graph_class_names)
python
def _split_classes_by_kind(self, class_name_to_definition): """Assign each class to the vertex, edge or non-graph type sets based on its kind.""" for class_name in class_name_to_definition: inheritance_set = self._inheritance_sets[class_name] is_vertex = ORIENTDB_BASE_VERTEX_CLASS_NAME in inheritance_set is_edge = ORIENTDB_BASE_EDGE_CLASS_NAME in inheritance_set if is_vertex and is_edge: raise AssertionError(u'Class {} appears to be both a vertex and an edge class: ' u'{}'.format(class_name, inheritance_set)) elif is_vertex: self._vertex_class_names.add(class_name) elif is_edge: self._edge_class_names.add(class_name) else: self._non_graph_class_names.add(class_name) # Freeze the classname sets so they cannot be modified again. self._vertex_class_names = frozenset(self._vertex_class_names) self._edge_class_names = frozenset(self._edge_class_names) self._non_graph_class_names = frozenset(self._non_graph_class_names)
[ "def", "_split_classes_by_kind", "(", "self", ",", "class_name_to_definition", ")", ":", "for", "class_name", "in", "class_name_to_definition", ":", "inheritance_set", "=", "self", ".", "_inheritance_sets", "[", "class_name", "]", "is_vertex", "=", "ORIENTDB_BASE_VERTEX_CLASS_NAME", "in", "inheritance_set", "is_edge", "=", "ORIENTDB_BASE_EDGE_CLASS_NAME", "in", "inheritance_set", "if", "is_vertex", "and", "is_edge", ":", "raise", "AssertionError", "(", "u'Class {} appears to be both a vertex and an edge class: '", "u'{}'", ".", "format", "(", "class_name", ",", "inheritance_set", ")", ")", "elif", "is_vertex", ":", "self", ".", "_vertex_class_names", ".", "add", "(", "class_name", ")", "elif", "is_edge", ":", "self", ".", "_edge_class_names", ".", "add", "(", "class_name", ")", "else", ":", "self", ".", "_non_graph_class_names", ".", "add", "(", "class_name", ")", "# Freeze the classname sets so they cannot be modified again.", "self", ".", "_vertex_class_names", "=", "frozenset", "(", "self", ".", "_vertex_class_names", ")", "self", ".", "_edge_class_names", "=", "frozenset", "(", "self", ".", "_edge_class_names", ")", "self", ".", "_non_graph_class_names", "=", "frozenset", "(", "self", ".", "_non_graph_class_names", ")" ]
Assign each class to the vertex, edge or non-graph type sets based on its kind.
[ "Assign", "each", "class", "to", "the", "vertex", "edge", "or", "non", "-", "graph", "type", "sets", "based", "on", "its", "kind", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L440-L461
233,111
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph._create_descriptor_from_property_definition
def _create_descriptor_from_property_definition(self, class_name, property_definition, class_name_to_definition): """Return a PropertyDescriptor corresponding to the given OrientDB property definition.""" name = property_definition['name'] type_id = property_definition['type'] linked_class = property_definition.get('linkedClass', None) linked_type = property_definition.get('linkedType', None) qualifier = None validate_supported_property_type_id(name, type_id) if type_id == PROPERTY_TYPE_LINK_ID: if class_name not in self._edge_class_names: raise AssertionError(u'Found a property of type Link on a non-edge class: ' u'{} {}'.format(name, class_name)) if name not in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: raise AssertionError(u'Found a property of type Link with an unexpected name: ' u'{} {}'.format(name, class_name)) if linked_class is None: raise AssertionError(u'Property "{}" is declared with type Link but has no ' u'linked class: {}'.format(name, property_definition)) if linked_class not in self._vertex_class_names: is_linked_class_abstract = class_name_to_definition[linked_class]['abstract'] all_subclasses_are_vertices = True for subclass in self._subclass_sets[linked_class]: if subclass != linked_class and subclass not in self.vertex_class_names: all_subclasses_are_vertices = False break if not (is_linked_class_abstract and all_subclasses_are_vertices): raise AssertionError(u'Property "{}" is declared as a Link to class {}, but ' u'that class is neither a vertex nor is it an ' u'abstract class whose subclasses are all vertices!' .format(name, linked_class)) qualifier = linked_class elif type_id in COLLECTION_PROPERTY_TYPES: if linked_class is not None and linked_type is not None: raise AssertionError(u'Property "{}" unexpectedly has both a linked class and ' u'a linked type: {}'.format(name, property_definition)) elif linked_type is not None and linked_class is None: # No linked class, must be a linked native OrientDB type. validate_supported_property_type_id(name + ' inner type', linked_type) qualifier = linked_type elif linked_class is not None and linked_type is None: # No linked type, must be a linked non-graph user-defined type. if linked_class not in self._non_graph_class_names: raise AssertionError(u'Property "{}" is declared as the inner type of ' u'an embedded collection, but is not a non-graph class: ' u'{}'.format(name, linked_class)) qualifier = linked_class else: raise AssertionError(u'Property "{}" is an embedded collection but has ' u'neither a linked class nor a linked type: ' u'{}'.format(name, property_definition)) default_value = None default_value_string = property_definition.get('defaultValue', None) if default_value_string is not None: default_value = parse_default_property_value(name, type_id, default_value_string) descriptor = PropertyDescriptor(type_id=type_id, qualifier=qualifier, default=default_value) # Sanity-check the descriptor before returning it. _validate_collections_have_default_values(class_name, name, descriptor) return descriptor
python
def _create_descriptor_from_property_definition(self, class_name, property_definition, class_name_to_definition): """Return a PropertyDescriptor corresponding to the given OrientDB property definition.""" name = property_definition['name'] type_id = property_definition['type'] linked_class = property_definition.get('linkedClass', None) linked_type = property_definition.get('linkedType', None) qualifier = None validate_supported_property_type_id(name, type_id) if type_id == PROPERTY_TYPE_LINK_ID: if class_name not in self._edge_class_names: raise AssertionError(u'Found a property of type Link on a non-edge class: ' u'{} {}'.format(name, class_name)) if name not in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: raise AssertionError(u'Found a property of type Link with an unexpected name: ' u'{} {}'.format(name, class_name)) if linked_class is None: raise AssertionError(u'Property "{}" is declared with type Link but has no ' u'linked class: {}'.format(name, property_definition)) if linked_class not in self._vertex_class_names: is_linked_class_abstract = class_name_to_definition[linked_class]['abstract'] all_subclasses_are_vertices = True for subclass in self._subclass_sets[linked_class]: if subclass != linked_class and subclass not in self.vertex_class_names: all_subclasses_are_vertices = False break if not (is_linked_class_abstract and all_subclasses_are_vertices): raise AssertionError(u'Property "{}" is declared as a Link to class {}, but ' u'that class is neither a vertex nor is it an ' u'abstract class whose subclasses are all vertices!' .format(name, linked_class)) qualifier = linked_class elif type_id in COLLECTION_PROPERTY_TYPES: if linked_class is not None and linked_type is not None: raise AssertionError(u'Property "{}" unexpectedly has both a linked class and ' u'a linked type: {}'.format(name, property_definition)) elif linked_type is not None and linked_class is None: # No linked class, must be a linked native OrientDB type. validate_supported_property_type_id(name + ' inner type', linked_type) qualifier = linked_type elif linked_class is not None and linked_type is None: # No linked type, must be a linked non-graph user-defined type. if linked_class not in self._non_graph_class_names: raise AssertionError(u'Property "{}" is declared as the inner type of ' u'an embedded collection, but is not a non-graph class: ' u'{}'.format(name, linked_class)) qualifier = linked_class else: raise AssertionError(u'Property "{}" is an embedded collection but has ' u'neither a linked class nor a linked type: ' u'{}'.format(name, property_definition)) default_value = None default_value_string = property_definition.get('defaultValue', None) if default_value_string is not None: default_value = parse_default_property_value(name, type_id, default_value_string) descriptor = PropertyDescriptor(type_id=type_id, qualifier=qualifier, default=default_value) # Sanity-check the descriptor before returning it. _validate_collections_have_default_values(class_name, name, descriptor) return descriptor
[ "def", "_create_descriptor_from_property_definition", "(", "self", ",", "class_name", ",", "property_definition", ",", "class_name_to_definition", ")", ":", "name", "=", "property_definition", "[", "'name'", "]", "type_id", "=", "property_definition", "[", "'type'", "]", "linked_class", "=", "property_definition", ".", "get", "(", "'linkedClass'", ",", "None", ")", "linked_type", "=", "property_definition", ".", "get", "(", "'linkedType'", ",", "None", ")", "qualifier", "=", "None", "validate_supported_property_type_id", "(", "name", ",", "type_id", ")", "if", "type_id", "==", "PROPERTY_TYPE_LINK_ID", ":", "if", "class_name", "not", "in", "self", ".", "_edge_class_names", ":", "raise", "AssertionError", "(", "u'Found a property of type Link on a non-edge class: '", "u'{} {}'", ".", "format", "(", "name", ",", "class_name", ")", ")", "if", "name", "not", "in", "{", "EDGE_SOURCE_PROPERTY_NAME", ",", "EDGE_DESTINATION_PROPERTY_NAME", "}", ":", "raise", "AssertionError", "(", "u'Found a property of type Link with an unexpected name: '", "u'{} {}'", ".", "format", "(", "name", ",", "class_name", ")", ")", "if", "linked_class", "is", "None", ":", "raise", "AssertionError", "(", "u'Property \"{}\" is declared with type Link but has no '", "u'linked class: {}'", ".", "format", "(", "name", ",", "property_definition", ")", ")", "if", "linked_class", "not", "in", "self", ".", "_vertex_class_names", ":", "is_linked_class_abstract", "=", "class_name_to_definition", "[", "linked_class", "]", "[", "'abstract'", "]", "all_subclasses_are_vertices", "=", "True", "for", "subclass", "in", "self", ".", "_subclass_sets", "[", "linked_class", "]", ":", "if", "subclass", "!=", "linked_class", "and", "subclass", "not", "in", "self", ".", "vertex_class_names", ":", "all_subclasses_are_vertices", "=", "False", "break", "if", "not", "(", "is_linked_class_abstract", "and", "all_subclasses_are_vertices", ")", ":", "raise", "AssertionError", "(", "u'Property \"{}\" is declared as a Link to class {}, but '", "u'that class is neither a vertex nor is it an '", "u'abstract class whose subclasses are all vertices!'", ".", "format", "(", "name", ",", "linked_class", ")", ")", "qualifier", "=", "linked_class", "elif", "type_id", "in", "COLLECTION_PROPERTY_TYPES", ":", "if", "linked_class", "is", "not", "None", "and", "linked_type", "is", "not", "None", ":", "raise", "AssertionError", "(", "u'Property \"{}\" unexpectedly has both a linked class and '", "u'a linked type: {}'", ".", "format", "(", "name", ",", "property_definition", ")", ")", "elif", "linked_type", "is", "not", "None", "and", "linked_class", "is", "None", ":", "# No linked class, must be a linked native OrientDB type.", "validate_supported_property_type_id", "(", "name", "+", "' inner type'", ",", "linked_type", ")", "qualifier", "=", "linked_type", "elif", "linked_class", "is", "not", "None", "and", "linked_type", "is", "None", ":", "# No linked type, must be a linked non-graph user-defined type.", "if", "linked_class", "not", "in", "self", ".", "_non_graph_class_names", ":", "raise", "AssertionError", "(", "u'Property \"{}\" is declared as the inner type of '", "u'an embedded collection, but is not a non-graph class: '", "u'{}'", ".", "format", "(", "name", ",", "linked_class", ")", ")", "qualifier", "=", "linked_class", "else", ":", "raise", "AssertionError", "(", "u'Property \"{}\" is an embedded collection but has '", "u'neither a linked class nor a linked type: '", "u'{}'", ".", "format", "(", "name", ",", "property_definition", ")", ")", "default_value", "=", "None", "default_value_string", "=", "property_definition", ".", "get", "(", "'defaultValue'", ",", "None", ")", "if", "default_value_string", "is", "not", "None", ":", "default_value", "=", "parse_default_property_value", "(", "name", ",", "type_id", ",", "default_value_string", ")", "descriptor", "=", "PropertyDescriptor", "(", "type_id", "=", "type_id", ",", "qualifier", "=", "qualifier", ",", "default", "=", "default_value", ")", "# Sanity-check the descriptor before returning it.", "_validate_collections_have_default_values", "(", "class_name", ",", "name", ",", "descriptor", ")", "return", "descriptor" ]
Return a PropertyDescriptor corresponding to the given OrientDB property definition.
[ "Return", "a", "PropertyDescriptor", "corresponding", "to", "the", "given", "OrientDB", "property", "definition", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L548-L616
233,112
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
SchemaGraph._link_vertex_and_edge_types
def _link_vertex_and_edge_types(self): """For each edge, link it to the vertex types it connects to each other.""" for edge_class_name in self._edge_class_names: edge_element = self._elements[edge_class_name] if (EDGE_SOURCE_PROPERTY_NAME not in edge_element.properties or EDGE_DESTINATION_PROPERTY_NAME not in edge_element.properties): if edge_element.abstract: continue else: raise AssertionError(u'Found a non-abstract edge class with undefined ' u'endpoint types: {}'.format(edge_element)) from_class_name = edge_element.properties[EDGE_SOURCE_PROPERTY_NAME].qualifier to_class_name = edge_element.properties[EDGE_DESTINATION_PROPERTY_NAME].qualifier edge_schema_element = self._elements[edge_class_name] # Link from_class_name with edge_class_name for from_class in self._subclass_sets[from_class_name]: from_schema_element = self._elements[from_class] from_schema_element.out_connections.add(edge_class_name) edge_schema_element.in_connections.add(from_class) # Link edge_class_name with to_class_name for to_class in self._subclass_sets[to_class_name]: to_schema_element = self._elements[to_class] edge_schema_element.out_connections.add(to_class) to_schema_element.in_connections.add(edge_class_name)
python
def _link_vertex_and_edge_types(self): """For each edge, link it to the vertex types it connects to each other.""" for edge_class_name in self._edge_class_names: edge_element = self._elements[edge_class_name] if (EDGE_SOURCE_PROPERTY_NAME not in edge_element.properties or EDGE_DESTINATION_PROPERTY_NAME not in edge_element.properties): if edge_element.abstract: continue else: raise AssertionError(u'Found a non-abstract edge class with undefined ' u'endpoint types: {}'.format(edge_element)) from_class_name = edge_element.properties[EDGE_SOURCE_PROPERTY_NAME].qualifier to_class_name = edge_element.properties[EDGE_DESTINATION_PROPERTY_NAME].qualifier edge_schema_element = self._elements[edge_class_name] # Link from_class_name with edge_class_name for from_class in self._subclass_sets[from_class_name]: from_schema_element = self._elements[from_class] from_schema_element.out_connections.add(edge_class_name) edge_schema_element.in_connections.add(from_class) # Link edge_class_name with to_class_name for to_class in self._subclass_sets[to_class_name]: to_schema_element = self._elements[to_class] edge_schema_element.out_connections.add(to_class) to_schema_element.in_connections.add(edge_class_name)
[ "def", "_link_vertex_and_edge_types", "(", "self", ")", ":", "for", "edge_class_name", "in", "self", ".", "_edge_class_names", ":", "edge_element", "=", "self", ".", "_elements", "[", "edge_class_name", "]", "if", "(", "EDGE_SOURCE_PROPERTY_NAME", "not", "in", "edge_element", ".", "properties", "or", "EDGE_DESTINATION_PROPERTY_NAME", "not", "in", "edge_element", ".", "properties", ")", ":", "if", "edge_element", ".", "abstract", ":", "continue", "else", ":", "raise", "AssertionError", "(", "u'Found a non-abstract edge class with undefined '", "u'endpoint types: {}'", ".", "format", "(", "edge_element", ")", ")", "from_class_name", "=", "edge_element", ".", "properties", "[", "EDGE_SOURCE_PROPERTY_NAME", "]", ".", "qualifier", "to_class_name", "=", "edge_element", ".", "properties", "[", "EDGE_DESTINATION_PROPERTY_NAME", "]", ".", "qualifier", "edge_schema_element", "=", "self", ".", "_elements", "[", "edge_class_name", "]", "# Link from_class_name with edge_class_name", "for", "from_class", "in", "self", ".", "_subclass_sets", "[", "from_class_name", "]", ":", "from_schema_element", "=", "self", ".", "_elements", "[", "from_class", "]", "from_schema_element", ".", "out_connections", ".", "add", "(", "edge_class_name", ")", "edge_schema_element", ".", "in_connections", ".", "add", "(", "from_class", ")", "# Link edge_class_name with to_class_name", "for", "to_class", "in", "self", ".", "_subclass_sets", "[", "to_class_name", "]", ":", "to_schema_element", "=", "self", ".", "_elements", "[", "to_class", "]", "edge_schema_element", ".", "out_connections", ".", "add", "(", "to_class", ")", "to_schema_element", ".", "in_connections", ".", "add", "(", "edge_class_name", ")" ]
For each edge, link it to the vertex types it connects to each other.
[ "For", "each", "edge", "link", "it", "to", "the", "vertex", "types", "it", "connects", "to", "each", "other", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L618-L646
233,113
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
_is_local_filter
def _is_local_filter(filter_block): """Return True if the Filter block references no non-local fields, and False otherwise.""" # We need the "result" value of this function to be mutated within the "visitor_fn". # Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here: # https://www.python.org/dev/peps/pep-3104/ # Instead, we use a dict to store the value we need mutated, since the "visitor_fn" # can mutate state in the parent scope, but not rebind variables in it without "nonlocal". # TODO(predrag): Revisit this if we drop support for Python 2. result = { 'is_local_filter': True } filter_predicate = filter_block.predicate def visitor_fn(expression): """Expression visitor function that looks for uses of non-local fields.""" non_local_expression_types = (ContextField, ContextFieldExistence) if isinstance(expression, non_local_expression_types): result['is_local_filter'] = False # Don't change the expression. return expression filter_predicate.visit_and_update(visitor_fn) return result['is_local_filter']
python
def _is_local_filter(filter_block): """Return True if the Filter block references no non-local fields, and False otherwise.""" # We need the "result" value of this function to be mutated within the "visitor_fn". # Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here: # https://www.python.org/dev/peps/pep-3104/ # Instead, we use a dict to store the value we need mutated, since the "visitor_fn" # can mutate state in the parent scope, but not rebind variables in it without "nonlocal". # TODO(predrag): Revisit this if we drop support for Python 2. result = { 'is_local_filter': True } filter_predicate = filter_block.predicate def visitor_fn(expression): """Expression visitor function that looks for uses of non-local fields.""" non_local_expression_types = (ContextField, ContextFieldExistence) if isinstance(expression, non_local_expression_types): result['is_local_filter'] = False # Don't change the expression. return expression filter_predicate.visit_and_update(visitor_fn) return result['is_local_filter']
[ "def", "_is_local_filter", "(", "filter_block", ")", ":", "# We need the \"result\" value of this function to be mutated within the \"visitor_fn\".", "# Since we support both Python 2 and Python 3, we can't use the \"nonlocal\" keyword here:", "# https://www.python.org/dev/peps/pep-3104/", "# Instead, we use a dict to store the value we need mutated, since the \"visitor_fn\"", "# can mutate state in the parent scope, but not rebind variables in it without \"nonlocal\".", "# TODO(predrag): Revisit this if we drop support for Python 2.", "result", "=", "{", "'is_local_filter'", ":", "True", "}", "filter_predicate", "=", "filter_block", ".", "predicate", "def", "visitor_fn", "(", "expression", ")", ":", "\"\"\"Expression visitor function that looks for uses of non-local fields.\"\"\"", "non_local_expression_types", "=", "(", "ContextField", ",", "ContextFieldExistence", ")", "if", "isinstance", "(", "expression", ",", "non_local_expression_types", ")", ":", "result", "[", "'is_local_filter'", "]", "=", "False", "# Don't change the expression.", "return", "expression", "filter_predicate", ".", "visit_and_update", "(", "visitor_fn", ")", "return", "result", "[", "'is_local_filter'", "]" ]
Return True if the Filter block references no non-local fields, and False otherwise.
[ "Return", "True", "if", "the", "Filter", "block", "references", "no", "non", "-", "local", "fields", "and", "False", "otherwise", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L53-L78
233,114
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
_calculate_type_bound_at_step
def _calculate_type_bound_at_step(match_step): """Return the GraphQL type bound at the given step, or None if no bound is given.""" current_type_bounds = [] if isinstance(match_step.root_block, QueryRoot): # The QueryRoot start class is a type bound. current_type_bounds.extend(match_step.root_block.start_class) if match_step.coerce_type_block is not None: # The CoerceType target class is also a type bound. current_type_bounds.extend(match_step.coerce_type_block.target_class) if current_type_bounds: # A type bound exists. Assert that there is exactly one bound, defined in precisely one way. return get_only_element_from_collection(current_type_bounds) else: # No type bound exists at this MATCH step. return None
python
def _calculate_type_bound_at_step(match_step): """Return the GraphQL type bound at the given step, or None if no bound is given.""" current_type_bounds = [] if isinstance(match_step.root_block, QueryRoot): # The QueryRoot start class is a type bound. current_type_bounds.extend(match_step.root_block.start_class) if match_step.coerce_type_block is not None: # The CoerceType target class is also a type bound. current_type_bounds.extend(match_step.coerce_type_block.target_class) if current_type_bounds: # A type bound exists. Assert that there is exactly one bound, defined in precisely one way. return get_only_element_from_collection(current_type_bounds) else: # No type bound exists at this MATCH step. return None
[ "def", "_calculate_type_bound_at_step", "(", "match_step", ")", ":", "current_type_bounds", "=", "[", "]", "if", "isinstance", "(", "match_step", ".", "root_block", ",", "QueryRoot", ")", ":", "# The QueryRoot start class is a type bound.", "current_type_bounds", ".", "extend", "(", "match_step", ".", "root_block", ".", "start_class", ")", "if", "match_step", ".", "coerce_type_block", "is", "not", "None", ":", "# The CoerceType target class is also a type bound.", "current_type_bounds", ".", "extend", "(", "match_step", ".", "coerce_type_block", ".", "target_class", ")", "if", "current_type_bounds", ":", "# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.", "return", "get_only_element_from_collection", "(", "current_type_bounds", ")", "else", ":", "# No type bound exists at this MATCH step.", "return", "None" ]
Return the GraphQL type bound at the given step, or None if no bound is given.
[ "Return", "the", "GraphQL", "type", "bound", "at", "the", "given", "step", "or", "None", "if", "no", "bound", "is", "given", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L188-L205
233,115
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
_assert_type_bounds_are_not_conflicting
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound, location, match_query): """Ensure that the two bounds either are an exact match, or one of them is None.""" if all((current_type_bound is not None, previous_type_bound is not None, current_type_bound != previous_type_bound)): raise AssertionError( u'Conflicting type bounds calculated at location {}: {} vs {} ' u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
python
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound, location, match_query): """Ensure that the two bounds either are an exact match, or one of them is None.""" if all((current_type_bound is not None, previous_type_bound is not None, current_type_bound != previous_type_bound)): raise AssertionError( u'Conflicting type bounds calculated at location {}: {} vs {} ' u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
[ "def", "_assert_type_bounds_are_not_conflicting", "(", "current_type_bound", ",", "previous_type_bound", ",", "location", ",", "match_query", ")", ":", "if", "all", "(", "(", "current_type_bound", "is", "not", "None", ",", "previous_type_bound", "is", "not", "None", ",", "current_type_bound", "!=", "previous_type_bound", ")", ")", ":", "raise", "AssertionError", "(", "u'Conflicting type bounds calculated at location {}: {} vs {} '", "u'for query {}'", ".", "format", "(", "location", ",", "previous_type_bound", ",", "current_type_bound", ",", "match_query", ")", ")" ]
Ensure that the two bounds either are an exact match, or one of them is None.
[ "Ensure", "that", "the", "two", "bounds", "either", "are", "an", "exact", "match", "or", "one", "of", "them", "is", "None", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L208-L216
233,116
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
_expose_only_preferred_locations
def _expose_only_preferred_locations(match_query, location_types, coerced_locations, preferred_locations, eligible_locations): """Return a MATCH query where only preferred locations are valid as query start locations.""" preferred_location_types = dict() eligible_location_types = dict() new_match_traversals = [] for current_traversal in match_query.match_traversals: new_traversal = [] for match_step in current_traversal: new_step = match_step current_step_location = match_step.as_block.location if current_step_location in preferred_locations: # This location is preferred. We have to make sure that at least one occurrence # of this location in the MATCH query has an associated "class:" clause, # which would be generated by a type bound at the corresponding MATCH step. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = preferred_location_types.get(current_step_location, None) if previous_type_bound is not None: # The location is already valid. If so, make sure that this step either does # not have any type bounds (e.g. via QueryRoot or CoerceType blocks), # or has type bounds that match the previously-decided type bound. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) else: # The location is not yet known to be valid. If it does not have # a type bound in this MATCH step, add a type coercion to the type # registered in "location_types". if current_type_bound is None: current_type_bound = location_types[current_step_location].name new_step = match_step._replace( coerce_type_block=CoerceType({current_type_bound})) preferred_location_types[current_step_location] = current_type_bound elif current_step_location in eligible_locations: # This location is eligible, but not preferred. We have not make sure # none of the MATCH steps with this location have type bounds, and therefore # will not produce a corresponding "class:" clause in the resulting MATCH query. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = eligible_location_types.get(current_step_location, None) if current_type_bound is not None: # There is a type bound here that we need to neutralize. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. eligible_location_types[current_step_location] = current_type_bound if (current_step_location not in coerced_locations or previous_type_bound is not None): # The type bound here is already implied by the GraphQL query structure, # or has already been applied at a previous occurrence of this location. # We can simply delete the QueryRoot / CoerceType blocks that impart it. if isinstance(match_step.root_block, QueryRoot): new_root_block = None else: new_root_block = match_step.root_block new_step = match_step._replace( root_block=new_root_block, coerce_type_block=None) else: # The type bound here is not already implied by the GraphQL query structure. # This should only be possible via a CoerceType block. Lower this CoerceType # block into a Filter with INSTANCEOF to ensure the resulting query has the # same semantics, while making the location invalid as a query start point. if (isinstance(match_step.root_block, QueryRoot) or match_step.coerce_type_block is None): raise AssertionError(u'Unexpected MATCH step applying a type bound not ' u'already implied by the GraphQL query structure: ' u'{} {}'.format(match_step, match_query)) new_where_block = convert_coerce_type_and_add_to_where_block( match_step.coerce_type_block, match_step.where_block) new_step = match_step._replace( coerce_type_block=None, where_block=new_where_block) else: # There is no type bound that OrientDB can find defined at this location. # No action is necessary. pass else: # This location is neither preferred nor eligible. # No action is necessary at this location. pass new_traversal.append(new_step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
python
def _expose_only_preferred_locations(match_query, location_types, coerced_locations, preferred_locations, eligible_locations): """Return a MATCH query where only preferred locations are valid as query start locations.""" preferred_location_types = dict() eligible_location_types = dict() new_match_traversals = [] for current_traversal in match_query.match_traversals: new_traversal = [] for match_step in current_traversal: new_step = match_step current_step_location = match_step.as_block.location if current_step_location in preferred_locations: # This location is preferred. We have to make sure that at least one occurrence # of this location in the MATCH query has an associated "class:" clause, # which would be generated by a type bound at the corresponding MATCH step. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = preferred_location_types.get(current_step_location, None) if previous_type_bound is not None: # The location is already valid. If so, make sure that this step either does # not have any type bounds (e.g. via QueryRoot or CoerceType blocks), # or has type bounds that match the previously-decided type bound. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) else: # The location is not yet known to be valid. If it does not have # a type bound in this MATCH step, add a type coercion to the type # registered in "location_types". if current_type_bound is None: current_type_bound = location_types[current_step_location].name new_step = match_step._replace( coerce_type_block=CoerceType({current_type_bound})) preferred_location_types[current_step_location] = current_type_bound elif current_step_location in eligible_locations: # This location is eligible, but not preferred. We have not make sure # none of the MATCH steps with this location have type bounds, and therefore # will not produce a corresponding "class:" clause in the resulting MATCH query. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = eligible_location_types.get(current_step_location, None) if current_type_bound is not None: # There is a type bound here that we need to neutralize. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. eligible_location_types[current_step_location] = current_type_bound if (current_step_location not in coerced_locations or previous_type_bound is not None): # The type bound here is already implied by the GraphQL query structure, # or has already been applied at a previous occurrence of this location. # We can simply delete the QueryRoot / CoerceType blocks that impart it. if isinstance(match_step.root_block, QueryRoot): new_root_block = None else: new_root_block = match_step.root_block new_step = match_step._replace( root_block=new_root_block, coerce_type_block=None) else: # The type bound here is not already implied by the GraphQL query structure. # This should only be possible via a CoerceType block. Lower this CoerceType # block into a Filter with INSTANCEOF to ensure the resulting query has the # same semantics, while making the location invalid as a query start point. if (isinstance(match_step.root_block, QueryRoot) or match_step.coerce_type_block is None): raise AssertionError(u'Unexpected MATCH step applying a type bound not ' u'already implied by the GraphQL query structure: ' u'{} {}'.format(match_step, match_query)) new_where_block = convert_coerce_type_and_add_to_where_block( match_step.coerce_type_block, match_step.where_block) new_step = match_step._replace( coerce_type_block=None, where_block=new_where_block) else: # There is no type bound that OrientDB can find defined at this location. # No action is necessary. pass else: # This location is neither preferred nor eligible. # No action is necessary at this location. pass new_traversal.append(new_step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
[ "def", "_expose_only_preferred_locations", "(", "match_query", ",", "location_types", ",", "coerced_locations", ",", "preferred_locations", ",", "eligible_locations", ")", ":", "preferred_location_types", "=", "dict", "(", ")", "eligible_location_types", "=", "dict", "(", ")", "new_match_traversals", "=", "[", "]", "for", "current_traversal", "in", "match_query", ".", "match_traversals", ":", "new_traversal", "=", "[", "]", "for", "match_step", "in", "current_traversal", ":", "new_step", "=", "match_step", "current_step_location", "=", "match_step", ".", "as_block", ".", "location", "if", "current_step_location", "in", "preferred_locations", ":", "# This location is preferred. We have to make sure that at least one occurrence", "# of this location in the MATCH query has an associated \"class:\" clause,", "# which would be generated by a type bound at the corresponding MATCH step.", "current_type_bound", "=", "_calculate_type_bound_at_step", "(", "match_step", ")", "previous_type_bound", "=", "preferred_location_types", ".", "get", "(", "current_step_location", ",", "None", ")", "if", "previous_type_bound", "is", "not", "None", ":", "# The location is already valid. If so, make sure that this step either does", "# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),", "# or has type bounds that match the previously-decided type bound.", "_assert_type_bounds_are_not_conflicting", "(", "current_type_bound", ",", "previous_type_bound", ",", "current_step_location", ",", "match_query", ")", "else", ":", "# The location is not yet known to be valid. If it does not have", "# a type bound in this MATCH step, add a type coercion to the type", "# registered in \"location_types\".", "if", "current_type_bound", "is", "None", ":", "current_type_bound", "=", "location_types", "[", "current_step_location", "]", ".", "name", "new_step", "=", "match_step", ".", "_replace", "(", "coerce_type_block", "=", "CoerceType", "(", "{", "current_type_bound", "}", ")", ")", "preferred_location_types", "[", "current_step_location", "]", "=", "current_type_bound", "elif", "current_step_location", "in", "eligible_locations", ":", "# This location is eligible, but not preferred. We have not make sure", "# none of the MATCH steps with this location have type bounds, and therefore", "# will not produce a corresponding \"class:\" clause in the resulting MATCH query.", "current_type_bound", "=", "_calculate_type_bound_at_step", "(", "match_step", ")", "previous_type_bound", "=", "eligible_location_types", ".", "get", "(", "current_step_location", ",", "None", ")", "if", "current_type_bound", "is", "not", "None", ":", "# There is a type bound here that we need to neutralize.", "_assert_type_bounds_are_not_conflicting", "(", "current_type_bound", ",", "previous_type_bound", ",", "current_step_location", ",", "match_query", ")", "# Record the deduced type bound, so that if we encounter this location again,", "# we ensure that we again infer the same type bound.", "eligible_location_types", "[", "current_step_location", "]", "=", "current_type_bound", "if", "(", "current_step_location", "not", "in", "coerced_locations", "or", "previous_type_bound", "is", "not", "None", ")", ":", "# The type bound here is already implied by the GraphQL query structure,", "# or has already been applied at a previous occurrence of this location.", "# We can simply delete the QueryRoot / CoerceType blocks that impart it.", "if", "isinstance", "(", "match_step", ".", "root_block", ",", "QueryRoot", ")", ":", "new_root_block", "=", "None", "else", ":", "new_root_block", "=", "match_step", ".", "root_block", "new_step", "=", "match_step", ".", "_replace", "(", "root_block", "=", "new_root_block", ",", "coerce_type_block", "=", "None", ")", "else", ":", "# The type bound here is not already implied by the GraphQL query structure.", "# This should only be possible via a CoerceType block. Lower this CoerceType", "# block into a Filter with INSTANCEOF to ensure the resulting query has the", "# same semantics, while making the location invalid as a query start point.", "if", "(", "isinstance", "(", "match_step", ".", "root_block", ",", "QueryRoot", ")", "or", "match_step", ".", "coerce_type_block", "is", "None", ")", ":", "raise", "AssertionError", "(", "u'Unexpected MATCH step applying a type bound not '", "u'already implied by the GraphQL query structure: '", "u'{} {}'", ".", "format", "(", "match_step", ",", "match_query", ")", ")", "new_where_block", "=", "convert_coerce_type_and_add_to_where_block", "(", "match_step", ".", "coerce_type_block", ",", "match_step", ".", "where_block", ")", "new_step", "=", "match_step", ".", "_replace", "(", "coerce_type_block", "=", "None", ",", "where_block", "=", "new_where_block", ")", "else", ":", "# There is no type bound that OrientDB can find defined at this location.", "# No action is necessary.", "pass", "else", ":", "# This location is neither preferred nor eligible.", "# No action is necessary at this location.", "pass", "new_traversal", ".", "append", "(", "new_step", ")", "new_match_traversals", ".", "append", "(", "new_traversal", ")", "return", "match_query", ".", "_replace", "(", "match_traversals", "=", "new_match_traversals", ")" ]
Return a MATCH query where only preferred locations are valid as query start locations.
[ "Return", "a", "MATCH", "query", "where", "only", "preferred", "locations", "are", "valid", "as", "query", "start", "locations", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L219-L308
233,117
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
_expose_all_eligible_locations
def _expose_all_eligible_locations(match_query, location_types, eligible_locations): """Return a MATCH query where all eligible locations are valid as query start locations.""" eligible_location_types = dict() new_match_traversals = [] for current_traversal in match_query.match_traversals: new_traversal = [] for match_step in current_traversal: new_step = match_step current_step_location = match_step.as_block.location if current_step_location in eligible_locations: # This location is eligible. We need to make sure it has an associated type bound, # so that it produces a "class:" clause that will make it a valid query start # location. It either already has such a type bound, or we can use the type # implied by the GraphQL query structure to add one. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = eligible_location_types.get(current_step_location, None) if current_type_bound is None: current_type_bound = location_types[current_step_location].name new_coerce_type_block = CoerceType({current_type_bound}) new_step = match_step._replace(coerce_type_block=new_coerce_type_block) else: # There is a type bound here. We simply ensure that the bound is not conflicting # with any other type bound at a different MATCH step with the same location. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. eligible_location_types[current_step_location] = current_type_bound else: # This function may only be called if there are no preferred locations. Since this # location cannot be preferred, and is not eligible, it must be ineligible. # No action is necessary in this case. pass new_traversal.append(new_step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
python
def _expose_all_eligible_locations(match_query, location_types, eligible_locations): """Return a MATCH query where all eligible locations are valid as query start locations.""" eligible_location_types = dict() new_match_traversals = [] for current_traversal in match_query.match_traversals: new_traversal = [] for match_step in current_traversal: new_step = match_step current_step_location = match_step.as_block.location if current_step_location in eligible_locations: # This location is eligible. We need to make sure it has an associated type bound, # so that it produces a "class:" clause that will make it a valid query start # location. It either already has such a type bound, or we can use the type # implied by the GraphQL query structure to add one. current_type_bound = _calculate_type_bound_at_step(match_step) previous_type_bound = eligible_location_types.get(current_step_location, None) if current_type_bound is None: current_type_bound = location_types[current_step_location].name new_coerce_type_block = CoerceType({current_type_bound}) new_step = match_step._replace(coerce_type_block=new_coerce_type_block) else: # There is a type bound here. We simply ensure that the bound is not conflicting # with any other type bound at a different MATCH step with the same location. _assert_type_bounds_are_not_conflicting( current_type_bound, previous_type_bound, current_step_location, match_query) # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. eligible_location_types[current_step_location] = current_type_bound else: # This function may only be called if there are no preferred locations. Since this # location cannot be preferred, and is not eligible, it must be ineligible. # No action is necessary in this case. pass new_traversal.append(new_step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
[ "def", "_expose_all_eligible_locations", "(", "match_query", ",", "location_types", ",", "eligible_locations", ")", ":", "eligible_location_types", "=", "dict", "(", ")", "new_match_traversals", "=", "[", "]", "for", "current_traversal", "in", "match_query", ".", "match_traversals", ":", "new_traversal", "=", "[", "]", "for", "match_step", "in", "current_traversal", ":", "new_step", "=", "match_step", "current_step_location", "=", "match_step", ".", "as_block", ".", "location", "if", "current_step_location", "in", "eligible_locations", ":", "# This location is eligible. We need to make sure it has an associated type bound,", "# so that it produces a \"class:\" clause that will make it a valid query start", "# location. It either already has such a type bound, or we can use the type", "# implied by the GraphQL query structure to add one.", "current_type_bound", "=", "_calculate_type_bound_at_step", "(", "match_step", ")", "previous_type_bound", "=", "eligible_location_types", ".", "get", "(", "current_step_location", ",", "None", ")", "if", "current_type_bound", "is", "None", ":", "current_type_bound", "=", "location_types", "[", "current_step_location", "]", ".", "name", "new_coerce_type_block", "=", "CoerceType", "(", "{", "current_type_bound", "}", ")", "new_step", "=", "match_step", ".", "_replace", "(", "coerce_type_block", "=", "new_coerce_type_block", ")", "else", ":", "# There is a type bound here. We simply ensure that the bound is not conflicting", "# with any other type bound at a different MATCH step with the same location.", "_assert_type_bounds_are_not_conflicting", "(", "current_type_bound", ",", "previous_type_bound", ",", "current_step_location", ",", "match_query", ")", "# Record the deduced type bound, so that if we encounter this location again,", "# we ensure that we again infer the same type bound.", "eligible_location_types", "[", "current_step_location", "]", "=", "current_type_bound", "else", ":", "# This function may only be called if there are no preferred locations. Since this", "# location cannot be preferred, and is not eligible, it must be ineligible.", "# No action is necessary in this case.", "pass", "new_traversal", ".", "append", "(", "new_step", ")", "new_match_traversals", ".", "append", "(", "new_traversal", ")", "return", "match_query", ".", "_replace", "(", "match_traversals", "=", "new_match_traversals", ")" ]
Return a MATCH query where all eligible locations are valid as query start locations.
[ "Return", "a", "MATCH", "query", "where", "all", "eligible", "locations", "are", "valid", "as", "query", "start", "locations", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L311-L350
233,118
kensho-technologies/graphql-compiler
graphql_compiler/compiler/workarounds/orientdb_query_execution.py
expose_ideal_query_execution_start_points
def expose_ideal_query_execution_start_points(compound_match_query, location_types, coerced_locations): """Ensure that OrientDB only considers desirable query start points in query planning.""" new_queries = [] for match_query in compound_match_query.match_queries: location_classification = _classify_query_locations(match_query) preferred_locations, eligible_locations, _ = location_classification if preferred_locations: # Convert all eligible locations into non-eligible ones, by removing # their "class:" clause. The "class:" clause is provided either by having # a QueryRoot block or a CoerceType block in the MatchStep corresponding # to the location. We remove it by converting the class check into # an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away. new_query = _expose_only_preferred_locations( match_query, location_types, coerced_locations, preferred_locations, eligible_locations) elif eligible_locations: # Make sure that all eligible locations have a "class:" clause by adding # a CoerceType block that is a no-op as guaranteed by the schema. This merely # ensures that OrientDB is able to use each of these locations as a query start point, # and will choose the one whose class is of lowest cardinality. new_query = _expose_all_eligible_locations( match_query, location_types, eligible_locations) else: raise AssertionError(u'This query has no preferred or eligible query start locations. ' u'This is almost certainly a bug: {}'.format(match_query)) new_queries.append(new_query) return compound_match_query._replace(match_queries=new_queries)
python
def expose_ideal_query_execution_start_points(compound_match_query, location_types, coerced_locations): """Ensure that OrientDB only considers desirable query start points in query planning.""" new_queries = [] for match_query in compound_match_query.match_queries: location_classification = _classify_query_locations(match_query) preferred_locations, eligible_locations, _ = location_classification if preferred_locations: # Convert all eligible locations into non-eligible ones, by removing # their "class:" clause. The "class:" clause is provided either by having # a QueryRoot block or a CoerceType block in the MatchStep corresponding # to the location. We remove it by converting the class check into # an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away. new_query = _expose_only_preferred_locations( match_query, location_types, coerced_locations, preferred_locations, eligible_locations) elif eligible_locations: # Make sure that all eligible locations have a "class:" clause by adding # a CoerceType block that is a no-op as guaranteed by the schema. This merely # ensures that OrientDB is able to use each of these locations as a query start point, # and will choose the one whose class is of lowest cardinality. new_query = _expose_all_eligible_locations( match_query, location_types, eligible_locations) else: raise AssertionError(u'This query has no preferred or eligible query start locations. ' u'This is almost certainly a bug: {}'.format(match_query)) new_queries.append(new_query) return compound_match_query._replace(match_queries=new_queries)
[ "def", "expose_ideal_query_execution_start_points", "(", "compound_match_query", ",", "location_types", ",", "coerced_locations", ")", ":", "new_queries", "=", "[", "]", "for", "match_query", "in", "compound_match_query", ".", "match_queries", ":", "location_classification", "=", "_classify_query_locations", "(", "match_query", ")", "preferred_locations", ",", "eligible_locations", ",", "_", "=", "location_classification", "if", "preferred_locations", ":", "# Convert all eligible locations into non-eligible ones, by removing", "# their \"class:\" clause. The \"class:\" clause is provided either by having", "# a QueryRoot block or a CoerceType block in the MatchStep corresponding", "# to the location. We remove it by converting the class check into", "# an \"INSTANCEOF\" Filter block, which OrientDB is unable to optimize away.", "new_query", "=", "_expose_only_preferred_locations", "(", "match_query", ",", "location_types", ",", "coerced_locations", ",", "preferred_locations", ",", "eligible_locations", ")", "elif", "eligible_locations", ":", "# Make sure that all eligible locations have a \"class:\" clause by adding", "# a CoerceType block that is a no-op as guaranteed by the schema. This merely", "# ensures that OrientDB is able to use each of these locations as a query start point,", "# and will choose the one whose class is of lowest cardinality.", "new_query", "=", "_expose_all_eligible_locations", "(", "match_query", ",", "location_types", ",", "eligible_locations", ")", "else", ":", "raise", "AssertionError", "(", "u'This query has no preferred or eligible query start locations. '", "u'This is almost certainly a bug: {}'", ".", "format", "(", "match_query", ")", ")", "new_queries", ".", "append", "(", "new_query", ")", "return", "compound_match_query", ".", "_replace", "(", "match_queries", "=", "new_queries", ")" ]
Ensure that OrientDB only considers desirable query start points in query planning.
[ "Ensure", "that", "OrientDB", "only", "considers", "desirable", "query", "start", "points", "in", "query", "planning", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/workarounds/orientdb_query_execution.py#L353-L384
233,119
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
_expression_list_to_conjunction
def _expression_list_to_conjunction(expression_list): """Return an Expression that is the `&&` of all the expressions in the given list.""" if not isinstance(expression_list, list): raise AssertionError(u'Expected list. Received {}: ' u'{}'.format(type(expression_list).__name__, expression_list)) if len(expression_list) == 0: raise AssertionError(u'Received empty expression_list ' u'(function should never be called with empty list): ' u'{}'.format(expression_list)) elif len(expression_list) == 1: return expression_list[0] else: remaining_conjunction = _expression_list_to_conjunction(expression_list[1:]) return BinaryComposition(u'&&', expression_list[0], remaining_conjunction)
python
def _expression_list_to_conjunction(expression_list): """Return an Expression that is the `&&` of all the expressions in the given list.""" if not isinstance(expression_list, list): raise AssertionError(u'Expected list. Received {}: ' u'{}'.format(type(expression_list).__name__, expression_list)) if len(expression_list) == 0: raise AssertionError(u'Received empty expression_list ' u'(function should never be called with empty list): ' u'{}'.format(expression_list)) elif len(expression_list) == 1: return expression_list[0] else: remaining_conjunction = _expression_list_to_conjunction(expression_list[1:]) return BinaryComposition(u'&&', expression_list[0], remaining_conjunction)
[ "def", "_expression_list_to_conjunction", "(", "expression_list", ")", ":", "if", "not", "isinstance", "(", "expression_list", ",", "list", ")", ":", "raise", "AssertionError", "(", "u'Expected list. Received {}: '", "u'{}'", ".", "format", "(", "type", "(", "expression_list", ")", ".", "__name__", ",", "expression_list", ")", ")", "if", "len", "(", "expression_list", ")", "==", "0", ":", "raise", "AssertionError", "(", "u'Received empty expression_list '", "u'(function should never be called with empty list): '", "u'{}'", ".", "format", "(", "expression_list", ")", ")", "elif", "len", "(", "expression_list", ")", "==", "1", ":", "return", "expression_list", "[", "0", "]", "else", ":", "remaining_conjunction", "=", "_expression_list_to_conjunction", "(", "expression_list", "[", "1", ":", "]", ")", "return", "BinaryComposition", "(", "u'&&'", ",", "expression_list", "[", "0", "]", ",", "remaining_conjunction", ")" ]
Return an Expression that is the `&&` of all the expressions in the given list.
[ "Return", "an", "Expression", "that", "is", "the", "&&", "of", "all", "the", "expressions", "in", "the", "given", "list", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L9-L22
233,120
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
_extract_conjuction_elements_from_expression
def _extract_conjuction_elements_from_expression(expression): """Return a generator for expressions that are connected by `&&`s in the given expression.""" if isinstance(expression, BinaryComposition) and expression.operator == u'&&': for element in _extract_conjuction_elements_from_expression(expression.left): yield element for element in _extract_conjuction_elements_from_expression(expression.right): yield element else: yield expression
python
def _extract_conjuction_elements_from_expression(expression): """Return a generator for expressions that are connected by `&&`s in the given expression.""" if isinstance(expression, BinaryComposition) and expression.operator == u'&&': for element in _extract_conjuction_elements_from_expression(expression.left): yield element for element in _extract_conjuction_elements_from_expression(expression.right): yield element else: yield expression
[ "def", "_extract_conjuction_elements_from_expression", "(", "expression", ")", ":", "if", "isinstance", "(", "expression", ",", "BinaryComposition", ")", "and", "expression", ".", "operator", "==", "u'&&'", ":", "for", "element", "in", "_extract_conjuction_elements_from_expression", "(", "expression", ".", "left", ")", ":", "yield", "element", "for", "element", "in", "_extract_conjuction_elements_from_expression", "(", "expression", ".", "right", ")", ":", "yield", "element", "else", ":", "yield", "expression" ]
Return a generator for expressions that are connected by `&&`s in the given expression.
[ "Return", "a", "generator", "for", "expressions", "that", "are", "connected", "by", "&&", "s", "in", "the", "given", "expression", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L25-L33
233,121
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
_construct_field_operator_expression_dict
def _construct_field_operator_expression_dict(expression_list): """Construct a mapping from local fields to specified operators, and corresponding expressions. Args: expression_list: list of expressions to analyze Returns: local_field_to_expressions: dict mapping local field names to "operator -> list of BinaryComposition" dictionaries, for each BinaryComposition operator involving the LocalField remaining_expression_list: list of remaining expressions that were *not* BinaryCompositions on a LocalField using any of the between operators """ between_operators = (u'<=', u'>=') inverse_operator = {u'>=': u'<=', u'<=': u'>='} local_field_to_expressions = {} remaining_expression_list = deque([]) for expression in expression_list: if all(( isinstance(expression, BinaryComposition), expression.operator in between_operators, isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField) )): if isinstance(expression.right, LocalField): new_operator = inverse_operator[expression.operator] new_expression = BinaryComposition(new_operator, expression.right, expression.left) else: new_expression = expression field_name = new_expression.left.field_name expressions_dict = local_field_to_expressions.setdefault(field_name, {}) expressions_dict.setdefault(new_expression.operator, []).append(new_expression) else: remaining_expression_list.append(expression) return local_field_to_expressions, remaining_expression_list
python
def _construct_field_operator_expression_dict(expression_list): """Construct a mapping from local fields to specified operators, and corresponding expressions. Args: expression_list: list of expressions to analyze Returns: local_field_to_expressions: dict mapping local field names to "operator -> list of BinaryComposition" dictionaries, for each BinaryComposition operator involving the LocalField remaining_expression_list: list of remaining expressions that were *not* BinaryCompositions on a LocalField using any of the between operators """ between_operators = (u'<=', u'>=') inverse_operator = {u'>=': u'<=', u'<=': u'>='} local_field_to_expressions = {} remaining_expression_list = deque([]) for expression in expression_list: if all(( isinstance(expression, BinaryComposition), expression.operator in between_operators, isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField) )): if isinstance(expression.right, LocalField): new_operator = inverse_operator[expression.operator] new_expression = BinaryComposition(new_operator, expression.right, expression.left) else: new_expression = expression field_name = new_expression.left.field_name expressions_dict = local_field_to_expressions.setdefault(field_name, {}) expressions_dict.setdefault(new_expression.operator, []).append(new_expression) else: remaining_expression_list.append(expression) return local_field_to_expressions, remaining_expression_list
[ "def", "_construct_field_operator_expression_dict", "(", "expression_list", ")", ":", "between_operators", "=", "(", "u'<='", ",", "u'>='", ")", "inverse_operator", "=", "{", "u'>='", ":", "u'<='", ",", "u'<='", ":", "u'>='", "}", "local_field_to_expressions", "=", "{", "}", "remaining_expression_list", "=", "deque", "(", "[", "]", ")", "for", "expression", "in", "expression_list", ":", "if", "all", "(", "(", "isinstance", "(", "expression", ",", "BinaryComposition", ")", ",", "expression", ".", "operator", "in", "between_operators", ",", "isinstance", "(", "expression", ".", "left", ",", "LocalField", ")", "or", "isinstance", "(", "expression", ".", "right", ",", "LocalField", ")", ")", ")", ":", "if", "isinstance", "(", "expression", ".", "right", ",", "LocalField", ")", ":", "new_operator", "=", "inverse_operator", "[", "expression", ".", "operator", "]", "new_expression", "=", "BinaryComposition", "(", "new_operator", ",", "expression", ".", "right", ",", "expression", ".", "left", ")", "else", ":", "new_expression", "=", "expression", "field_name", "=", "new_expression", ".", "left", ".", "field_name", "expressions_dict", "=", "local_field_to_expressions", ".", "setdefault", "(", "field_name", ",", "{", "}", ")", "expressions_dict", ".", "setdefault", "(", "new_expression", ".", "operator", ",", "[", "]", ")", ".", "append", "(", "new_expression", ")", "else", ":", "remaining_expression_list", ".", "append", "(", "expression", ")", "return", "local_field_to_expressions", ",", "remaining_expression_list" ]
Construct a mapping from local fields to specified operators, and corresponding expressions. Args: expression_list: list of expressions to analyze Returns: local_field_to_expressions: dict mapping local field names to "operator -> list of BinaryComposition" dictionaries, for each BinaryComposition operator involving the LocalField remaining_expression_list: list of remaining expressions that were *not* BinaryCompositions on a LocalField using any of the between operators
[ "Construct", "a", "mapping", "from", "local", "fields", "to", "specified", "operators", "and", "corresponding", "expressions", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L36-L70
233,122
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
_lower_expressions_to_between
def _lower_expressions_to_between(base_expression): """Return a new expression, with any eligible comparisons lowered to `between` clauses.""" expression_list = list(_extract_conjuction_elements_from_expression(base_expression)) if len(expression_list) == 0: raise AssertionError(u'Received empty expression_list {} from base_expression: ' u'{}'.format(expression_list, base_expression)) elif len(expression_list) == 1: return base_expression else: between_operators = (u'<=', u'>=') local_field_to_expressions, new_expression_list = _construct_field_operator_expression_dict( expression_list) lowering_occurred = False for field_name in local_field_to_expressions: expressions_dict = local_field_to_expressions[field_name] if all(operator in expressions_dict and len(expressions_dict[operator]) == 1 for operator in between_operators): field = LocalField(field_name) lower_bound = expressions_dict[u'>='][0].right upper_bound = expressions_dict[u'<='][0].right new_expression_list.appendleft(BetweenClause(field, lower_bound, upper_bound)) lowering_occurred = True else: for expression in expressions_dict.values(): new_expression_list.extend(expression) if lowering_occurred: return _expression_list_to_conjunction(list(new_expression_list)) else: return base_expression
python
def _lower_expressions_to_between(base_expression): """Return a new expression, with any eligible comparisons lowered to `between` clauses.""" expression_list = list(_extract_conjuction_elements_from_expression(base_expression)) if len(expression_list) == 0: raise AssertionError(u'Received empty expression_list {} from base_expression: ' u'{}'.format(expression_list, base_expression)) elif len(expression_list) == 1: return base_expression else: between_operators = (u'<=', u'>=') local_field_to_expressions, new_expression_list = _construct_field_operator_expression_dict( expression_list) lowering_occurred = False for field_name in local_field_to_expressions: expressions_dict = local_field_to_expressions[field_name] if all(operator in expressions_dict and len(expressions_dict[operator]) == 1 for operator in between_operators): field = LocalField(field_name) lower_bound = expressions_dict[u'>='][0].right upper_bound = expressions_dict[u'<='][0].right new_expression_list.appendleft(BetweenClause(field, lower_bound, upper_bound)) lowering_occurred = True else: for expression in expressions_dict.values(): new_expression_list.extend(expression) if lowering_occurred: return _expression_list_to_conjunction(list(new_expression_list)) else: return base_expression
[ "def", "_lower_expressions_to_between", "(", "base_expression", ")", ":", "expression_list", "=", "list", "(", "_extract_conjuction_elements_from_expression", "(", "base_expression", ")", ")", "if", "len", "(", "expression_list", ")", "==", "0", ":", "raise", "AssertionError", "(", "u'Received empty expression_list {} from base_expression: '", "u'{}'", ".", "format", "(", "expression_list", ",", "base_expression", ")", ")", "elif", "len", "(", "expression_list", ")", "==", "1", ":", "return", "base_expression", "else", ":", "between_operators", "=", "(", "u'<='", ",", "u'>='", ")", "local_field_to_expressions", ",", "new_expression_list", "=", "_construct_field_operator_expression_dict", "(", "expression_list", ")", "lowering_occurred", "=", "False", "for", "field_name", "in", "local_field_to_expressions", ":", "expressions_dict", "=", "local_field_to_expressions", "[", "field_name", "]", "if", "all", "(", "operator", "in", "expressions_dict", "and", "len", "(", "expressions_dict", "[", "operator", "]", ")", "==", "1", "for", "operator", "in", "between_operators", ")", ":", "field", "=", "LocalField", "(", "field_name", ")", "lower_bound", "=", "expressions_dict", "[", "u'>='", "]", "[", "0", "]", ".", "right", "upper_bound", "=", "expressions_dict", "[", "u'<='", "]", "[", "0", "]", ".", "right", "new_expression_list", ".", "appendleft", "(", "BetweenClause", "(", "field", ",", "lower_bound", ",", "upper_bound", ")", ")", "lowering_occurred", "=", "True", "else", ":", "for", "expression", "in", "expressions_dict", ".", "values", "(", ")", ":", "new_expression_list", ".", "extend", "(", "expression", ")", "if", "lowering_occurred", ":", "return", "_expression_list_to_conjunction", "(", "list", "(", "new_expression_list", ")", ")", "else", ":", "return", "base_expression" ]
Return a new expression, with any eligible comparisons lowered to `between` clauses.
[ "Return", "a", "new", "expression", "with", "any", "eligible", "comparisons", "lowered", "to", "between", "clauses", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L73-L103
233,123
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/between_lowering.py
lower_comparisons_to_between
def lower_comparisons_to_between(match_query): """Return a new MatchQuery, with all eligible comparison filters lowered to between clauses.""" new_match_traversals = [] for current_match_traversal in match_query.match_traversals: new_traversal = [] for step in current_match_traversal: if step.where_block: expression = step.where_block.predicate new_where_block = Filter(_lower_expressions_to_between(expression)) new_traversal.append(step._replace(where_block=new_where_block)) else: new_traversal.append(step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
python
def lower_comparisons_to_between(match_query): """Return a new MatchQuery, with all eligible comparison filters lowered to between clauses.""" new_match_traversals = [] for current_match_traversal in match_query.match_traversals: new_traversal = [] for step in current_match_traversal: if step.where_block: expression = step.where_block.predicate new_where_block = Filter(_lower_expressions_to_between(expression)) new_traversal.append(step._replace(where_block=new_where_block)) else: new_traversal.append(step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
[ "def", "lower_comparisons_to_between", "(", "match_query", ")", ":", "new_match_traversals", "=", "[", "]", "for", "current_match_traversal", "in", "match_query", ".", "match_traversals", ":", "new_traversal", "=", "[", "]", "for", "step", "in", "current_match_traversal", ":", "if", "step", ".", "where_block", ":", "expression", "=", "step", ".", "where_block", ".", "predicate", "new_where_block", "=", "Filter", "(", "_lower_expressions_to_between", "(", "expression", ")", ")", "new_traversal", ".", "append", "(", "step", ".", "_replace", "(", "where_block", "=", "new_where_block", ")", ")", "else", ":", "new_traversal", ".", "append", "(", "step", ")", "new_match_traversals", ".", "append", "(", "new_traversal", ")", "return", "match_query", ".", "_replace", "(", "match_traversals", "=", "new_match_traversals", ")" ]
Return a new MatchQuery, with all eligible comparison filters lowered to between clauses.
[ "Return", "a", "new", "MatchQuery", "with", "all", "eligible", "comparison", "filters", "lowered", "to", "between", "clauses", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/between_lowering.py#L106-L122
233,124
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/common.py
_ensure_arguments_are_provided
def _ensure_arguments_are_provided(expected_types, arguments): """Ensure that all arguments expected by the query were actually provided.""" # This function only checks that the arguments were specified, # and does not check types. Type checking is done as part of the actual formatting step. expected_arg_names = set(six.iterkeys(expected_types)) provided_arg_names = set(six.iterkeys(arguments)) if expected_arg_names != provided_arg_names: missing_args = expected_arg_names - provided_arg_names unexpected_args = provided_arg_names - expected_arg_names raise GraphQLInvalidArgumentError(u'Missing or unexpected arguments found: ' u'missing {}, unexpected ' u'{}'.format(missing_args, unexpected_args))
python
def _ensure_arguments_are_provided(expected_types, arguments): """Ensure that all arguments expected by the query were actually provided.""" # This function only checks that the arguments were specified, # and does not check types. Type checking is done as part of the actual formatting step. expected_arg_names = set(six.iterkeys(expected_types)) provided_arg_names = set(six.iterkeys(arguments)) if expected_arg_names != provided_arg_names: missing_args = expected_arg_names - provided_arg_names unexpected_args = provided_arg_names - expected_arg_names raise GraphQLInvalidArgumentError(u'Missing or unexpected arguments found: ' u'missing {}, unexpected ' u'{}'.format(missing_args, unexpected_args))
[ "def", "_ensure_arguments_are_provided", "(", "expected_types", ",", "arguments", ")", ":", "# This function only checks that the arguments were specified,", "# and does not check types. Type checking is done as part of the actual formatting step.", "expected_arg_names", "=", "set", "(", "six", ".", "iterkeys", "(", "expected_types", ")", ")", "provided_arg_names", "=", "set", "(", "six", ".", "iterkeys", "(", "arguments", ")", ")", "if", "expected_arg_names", "!=", "provided_arg_names", ":", "missing_args", "=", "expected_arg_names", "-", "provided_arg_names", "unexpected_args", "=", "provided_arg_names", "-", "expected_arg_names", "raise", "GraphQLInvalidArgumentError", "(", "u'Missing or unexpected arguments found: '", "u'missing {}, unexpected '", "u'{}'", ".", "format", "(", "missing_args", ",", "unexpected_args", ")", ")" ]
Ensure that all arguments expected by the query were actually provided.
[ "Ensure", "that", "all", "arguments", "expected", "by", "the", "query", "were", "actually", "provided", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/common.py#L12-L24
233,125
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/common.py
insert_arguments_into_query
def insert_arguments_into_query(compilation_result, arguments): """Insert the arguments into the compiled GraphQL query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a query in the appropriate output language, with inserted argument data """ _ensure_arguments_are_provided(compilation_result.input_metadata, arguments) if compilation_result.language == MATCH_LANGUAGE: return insert_arguments_into_match_query(compilation_result, arguments) elif compilation_result.language == GREMLIN_LANGUAGE: return insert_arguments_into_gremlin_query(compilation_result, arguments) elif compilation_result.language == SQL_LANGUAGE: return insert_arguments_into_sql_query(compilation_result, arguments) else: raise AssertionError(u'Unrecognized language in compilation result: ' u'{}'.format(compilation_result))
python
def insert_arguments_into_query(compilation_result, arguments): """Insert the arguments into the compiled GraphQL query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a query in the appropriate output language, with inserted argument data """ _ensure_arguments_are_provided(compilation_result.input_metadata, arguments) if compilation_result.language == MATCH_LANGUAGE: return insert_arguments_into_match_query(compilation_result, arguments) elif compilation_result.language == GREMLIN_LANGUAGE: return insert_arguments_into_gremlin_query(compilation_result, arguments) elif compilation_result.language == SQL_LANGUAGE: return insert_arguments_into_sql_query(compilation_result, arguments) else: raise AssertionError(u'Unrecognized language in compilation result: ' u'{}'.format(compilation_result))
[ "def", "insert_arguments_into_query", "(", "compilation_result", ",", "arguments", ")", ":", "_ensure_arguments_are_provided", "(", "compilation_result", ".", "input_metadata", ",", "arguments", ")", "if", "compilation_result", ".", "language", "==", "MATCH_LANGUAGE", ":", "return", "insert_arguments_into_match_query", "(", "compilation_result", ",", "arguments", ")", "elif", "compilation_result", ".", "language", "==", "GREMLIN_LANGUAGE", ":", "return", "insert_arguments_into_gremlin_query", "(", "compilation_result", ",", "arguments", ")", "elif", "compilation_result", ".", "language", "==", "SQL_LANGUAGE", ":", "return", "insert_arguments_into_sql_query", "(", "compilation_result", ",", "arguments", ")", "else", ":", "raise", "AssertionError", "(", "u'Unrecognized language in compilation result: '", "u'{}'", ".", "format", "(", "compilation_result", ")", ")" ]
Insert the arguments into the compiled GraphQL query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a query in the appropriate output language, with inserted argument data
[ "Insert", "the", "arguments", "into", "the", "compiled", "GraphQL", "query", "to", "form", "a", "complete", "query", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/common.py#L31-L51
233,126
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
QueryRoot.validate
def validate(self): """Ensure that the QueryRoot block is valid.""" if not (isinstance(self.start_class, set) and all(isinstance(x, six.string_types) for x in self.start_class)): raise TypeError(u'Expected set of string start_class, got: {} {}'.format( type(self.start_class).__name__, self.start_class)) for cls in self.start_class: validate_safe_string(cls)
python
def validate(self): """Ensure that the QueryRoot block is valid.""" if not (isinstance(self.start_class, set) and all(isinstance(x, six.string_types) for x in self.start_class)): raise TypeError(u'Expected set of string start_class, got: {} {}'.format( type(self.start_class).__name__, self.start_class)) for cls in self.start_class: validate_safe_string(cls)
[ "def", "validate", "(", "self", ")", ":", "if", "not", "(", "isinstance", "(", "self", ".", "start_class", ",", "set", ")", "and", "all", "(", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", "for", "x", "in", "self", ".", "start_class", ")", ")", ":", "raise", "TypeError", "(", "u'Expected set of string start_class, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "start_class", ")", ".", "__name__", ",", "self", ".", "start_class", ")", ")", "for", "cls", "in", "self", ".", "start_class", ":", "validate_safe_string", "(", "cls", ")" ]
Ensure that the QueryRoot block is valid.
[ "Ensure", "that", "the", "QueryRoot", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L34-L42
233,127
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
CoerceType.validate
def validate(self): """Ensure that the CoerceType block is valid.""" if not (isinstance(self.target_class, set) and all(isinstance(x, six.string_types) for x in self.target_class)): raise TypeError(u'Expected set of string target_class, got: {} {}'.format( type(self.target_class).__name__, self.target_class)) for cls in self.target_class: validate_safe_string(cls)
python
def validate(self): """Ensure that the CoerceType block is valid.""" if not (isinstance(self.target_class, set) and all(isinstance(x, six.string_types) for x in self.target_class)): raise TypeError(u'Expected set of string target_class, got: {} {}'.format( type(self.target_class).__name__, self.target_class)) for cls in self.target_class: validate_safe_string(cls)
[ "def", "validate", "(", "self", ")", ":", "if", "not", "(", "isinstance", "(", "self", ".", "target_class", ",", "set", ")", "and", "all", "(", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", "for", "x", "in", "self", ".", "target_class", ")", ")", ":", "raise", "TypeError", "(", "u'Expected set of string target_class, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "target_class", ")", ".", "__name__", ",", "self", ".", "target_class", ")", ")", "for", "cls", "in", "self", ".", "target_class", ":", "validate_safe_string", "(", "cls", ")" ]
Ensure that the CoerceType block is valid.
[ "Ensure", "that", "the", "CoerceType", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L79-L87
233,128
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
ConstructResult.validate
def validate(self): """Ensure that the ConstructResult block is valid.""" if not isinstance(self.fields, dict): raise TypeError(u'Expected dict fields, got: {} {}'.format( type(self.fields).__name__, self.fields)) for key, value in six.iteritems(self.fields): validate_safe_string(key) if not isinstance(value, Expression): raise TypeError( u'Expected Expression values in the fields dict, got: ' u'{} -> {}'.format(key, value))
python
def validate(self): """Ensure that the ConstructResult block is valid.""" if not isinstance(self.fields, dict): raise TypeError(u'Expected dict fields, got: {} {}'.format( type(self.fields).__name__, self.fields)) for key, value in six.iteritems(self.fields): validate_safe_string(key) if not isinstance(value, Expression): raise TypeError( u'Expected Expression values in the fields dict, got: ' u'{} -> {}'.format(key, value))
[ "def", "validate", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "fields", ",", "dict", ")", ":", "raise", "TypeError", "(", "u'Expected dict fields, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "fields", ")", ".", "__name__", ",", "self", ".", "fields", ")", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "self", ".", "fields", ")", ":", "validate_safe_string", "(", "key", ")", "if", "not", "isinstance", "(", "value", ",", "Expression", ")", ":", "raise", "TypeError", "(", "u'Expected Expression values in the fields dict, got: '", "u'{} -> {}'", ".", "format", "(", "key", ",", "value", ")", ")" ]
Ensure that the ConstructResult block is valid.
[ "Ensure", "that", "the", "ConstructResult", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L120-L131
233,129
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
Filter.validate
def validate(self): """Ensure that the Filter block is valid.""" if not isinstance(self.predicate, Expression): raise TypeError(u'Expected Expression predicate, got: {} {}'.format( type(self.predicate).__name__, self.predicate))
python
def validate(self): """Ensure that the Filter block is valid.""" if not isinstance(self.predicate, Expression): raise TypeError(u'Expected Expression predicate, got: {} {}'.format( type(self.predicate).__name__, self.predicate))
[ "def", "validate", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "predicate", ",", "Expression", ")", ":", "raise", "TypeError", "(", "u'Expected Expression predicate, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "predicate", ")", ".", "__name__", ",", "self", ".", "predicate", ")", ")" ]
Ensure that the Filter block is valid.
[ "Ensure", "that", "the", "Filter", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L174-L178
233,130
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
Backtrack.validate
def validate(self): """Ensure that the Backtrack block is valid.""" validate_marked_location(self.location) if not isinstance(self.optional, bool): raise TypeError(u'Expected bool optional, got: {} {}'.format( type(self.optional).__name__, self.optional))
python
def validate(self): """Ensure that the Backtrack block is valid.""" validate_marked_location(self.location) if not isinstance(self.optional, bool): raise TypeError(u'Expected bool optional, got: {} {}'.format( type(self.optional).__name__, self.optional))
[ "def", "validate", "(", "self", ")", ":", "validate_marked_location", "(", "self", ".", "location", ")", "if", "not", "isinstance", "(", "self", ".", "optional", ",", "bool", ")", ":", "raise", "TypeError", "(", "u'Expected bool optional, got: {} {}'", ".", "format", "(", "type", "(", "self", ".", "optional", ")", ".", "__name__", ",", "self", ".", "optional", ")", ")" ]
Ensure that the Backtrack block is valid.
[ "Ensure", "that", "the", "Backtrack", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L395-L400
233,131
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
Backtrack.to_gremlin
def to_gremlin(self): """Return a unicode object with the Gremlin representation of this BasicBlock.""" self.validate() if self.optional: operation = u'optional' else: operation = u'back' mark_name, _ = self.location.get_location_name() return u'{operation}({mark_name})'.format( operation=operation, mark_name=safe_quoted_string(mark_name))
python
def to_gremlin(self): """Return a unicode object with the Gremlin representation of this BasicBlock.""" self.validate() if self.optional: operation = u'optional' else: operation = u'back' mark_name, _ = self.location.get_location_name() return u'{operation}({mark_name})'.format( operation=operation, mark_name=safe_quoted_string(mark_name))
[ "def", "to_gremlin", "(", "self", ")", ":", "self", ".", "validate", "(", ")", "if", "self", ".", "optional", ":", "operation", "=", "u'optional'", "else", ":", "operation", "=", "u'back'", "mark_name", ",", "_", "=", "self", ".", "location", ".", "get_location_name", "(", ")", "return", "u'{operation}({mark_name})'", ".", "format", "(", "operation", "=", "operation", ",", "mark_name", "=", "safe_quoted_string", "(", "mark_name", ")", ")" ]
Return a unicode object with the Gremlin representation of this BasicBlock.
[ "Return", "a", "unicode", "object", "with", "the", "Gremlin", "representation", "of", "this", "BasicBlock", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L402-L414
233,132
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
Fold.validate
def validate(self): """Ensure the Fold block is valid.""" if not isinstance(self.fold_scope_location, FoldScopeLocation): raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} ' u'{}'.format(type(self.fold_scope_location), self.fold_scope_location))
python
def validate(self): """Ensure the Fold block is valid.""" if not isinstance(self.fold_scope_location, FoldScopeLocation): raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} ' u'{}'.format(type(self.fold_scope_location), self.fold_scope_location))
[ "def", "validate", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "fold_scope_location", ",", "FoldScopeLocation", ")", ":", "raise", "TypeError", "(", "u'Expected a FoldScopeLocation for fold_scope_location, got: {} '", "u'{}'", ".", "format", "(", "type", "(", "self", ".", "fold_scope_location", ")", ",", "self", ".", "fold_scope_location", ")", ")" ]
Ensure the Fold block is valid.
[ "Ensure", "the", "Fold", "block", "is", "valid", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L446-L450
233,133
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
lower_ir
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None): """Lower the IR blocks into a form that can be represented by a SQL query. Args: ir_blocks: list of IR blocks to lower into SQL-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: tree representation of IR blocks for recursive traversal by SQL backend. """ _validate_all_blocks_supported(ir_blocks, query_metadata_table) construct_result = _get_construct_result(ir_blocks) query_path_to_location_info = _map_query_path_to_location_info(query_metadata_table) query_path_to_output_fields = _map_query_path_to_outputs( construct_result, query_path_to_location_info) block_index_to_location = _map_block_index_to_location(ir_blocks) # perform lowering steps ir_blocks = lower_unary_transformations(ir_blocks) ir_blocks = lower_unsupported_metafield_expressions(ir_blocks) # iteratively construct SqlTree query_path_to_node = {} query_path_to_filters = {} tree_root = None for index, block in enumerate(ir_blocks): if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES): continue location = block_index_to_location[index] if isinstance(block, (blocks.QueryRoot,)): query_path = location.query_path if tree_root is not None: raise AssertionError( u'Encountered QueryRoot {} but tree root is already set to {} during ' u'construction of SQL query tree for IR blocks {} with query ' u'metadata table {}'.format( block, tree_root, ir_blocks, query_metadata_table)) tree_root = SqlNode(block=block, query_path=query_path) query_path_to_node[query_path] = tree_root elif isinstance(block, blocks.Filter): query_path_to_filters.setdefault(query_path, []).append(block) else: raise AssertionError( u'Unsupported block {} unexpectedly passed validation for IR blocks ' u'{} with query metadata table {} .'.format(block, ir_blocks, query_metadata_table)) return SqlQueryTree(tree_root, query_path_to_location_info, query_path_to_output_fields, query_path_to_filters, query_path_to_node)
python
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None): """Lower the IR blocks into a form that can be represented by a SQL query. Args: ir_blocks: list of IR blocks to lower into SQL-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: tree representation of IR blocks for recursive traversal by SQL backend. """ _validate_all_blocks_supported(ir_blocks, query_metadata_table) construct_result = _get_construct_result(ir_blocks) query_path_to_location_info = _map_query_path_to_location_info(query_metadata_table) query_path_to_output_fields = _map_query_path_to_outputs( construct_result, query_path_to_location_info) block_index_to_location = _map_block_index_to_location(ir_blocks) # perform lowering steps ir_blocks = lower_unary_transformations(ir_blocks) ir_blocks = lower_unsupported_metafield_expressions(ir_blocks) # iteratively construct SqlTree query_path_to_node = {} query_path_to_filters = {} tree_root = None for index, block in enumerate(ir_blocks): if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES): continue location = block_index_to_location[index] if isinstance(block, (blocks.QueryRoot,)): query_path = location.query_path if tree_root is not None: raise AssertionError( u'Encountered QueryRoot {} but tree root is already set to {} during ' u'construction of SQL query tree for IR blocks {} with query ' u'metadata table {}'.format( block, tree_root, ir_blocks, query_metadata_table)) tree_root = SqlNode(block=block, query_path=query_path) query_path_to_node[query_path] = tree_root elif isinstance(block, blocks.Filter): query_path_to_filters.setdefault(query_path, []).append(block) else: raise AssertionError( u'Unsupported block {} unexpectedly passed validation for IR blocks ' u'{} with query metadata table {} .'.format(block, ir_blocks, query_metadata_table)) return SqlQueryTree(tree_root, query_path_to_location_info, query_path_to_output_fields, query_path_to_filters, query_path_to_node)
[ "def", "lower_ir", "(", "ir_blocks", ",", "query_metadata_table", ",", "type_equivalence_hints", "=", "None", ")", ":", "_validate_all_blocks_supported", "(", "ir_blocks", ",", "query_metadata_table", ")", "construct_result", "=", "_get_construct_result", "(", "ir_blocks", ")", "query_path_to_location_info", "=", "_map_query_path_to_location_info", "(", "query_metadata_table", ")", "query_path_to_output_fields", "=", "_map_query_path_to_outputs", "(", "construct_result", ",", "query_path_to_location_info", ")", "block_index_to_location", "=", "_map_block_index_to_location", "(", "ir_blocks", ")", "# perform lowering steps", "ir_blocks", "=", "lower_unary_transformations", "(", "ir_blocks", ")", "ir_blocks", "=", "lower_unsupported_metafield_expressions", "(", "ir_blocks", ")", "# iteratively construct SqlTree", "query_path_to_node", "=", "{", "}", "query_path_to_filters", "=", "{", "}", "tree_root", "=", "None", "for", "index", ",", "block", "in", "enumerate", "(", "ir_blocks", ")", ":", "if", "isinstance", "(", "block", ",", "constants", ".", "SKIPPABLE_BLOCK_TYPES", ")", ":", "continue", "location", "=", "block_index_to_location", "[", "index", "]", "if", "isinstance", "(", "block", ",", "(", "blocks", ".", "QueryRoot", ",", ")", ")", ":", "query_path", "=", "location", ".", "query_path", "if", "tree_root", "is", "not", "None", ":", "raise", "AssertionError", "(", "u'Encountered QueryRoot {} but tree root is already set to {} during '", "u'construction of SQL query tree for IR blocks {} with query '", "u'metadata table {}'", ".", "format", "(", "block", ",", "tree_root", ",", "ir_blocks", ",", "query_metadata_table", ")", ")", "tree_root", "=", "SqlNode", "(", "block", "=", "block", ",", "query_path", "=", "query_path", ")", "query_path_to_node", "[", "query_path", "]", "=", "tree_root", "elif", "isinstance", "(", "block", ",", "blocks", ".", "Filter", ")", ":", "query_path_to_filters", ".", "setdefault", "(", "query_path", ",", "[", "]", ")", ".", "append", "(", "block", ")", "else", ":", "raise", "AssertionError", "(", "u'Unsupported block {} unexpectedly passed validation for IR blocks '", "u'{} with query metadata table {} .'", ".", "format", "(", "block", ",", "ir_blocks", ",", "query_metadata_table", ")", ")", "return", "SqlQueryTree", "(", "tree_root", ",", "query_path_to_location_info", ",", "query_path_to_output_fields", ",", "query_path_to_filters", ",", "query_path_to_node", ")" ]
Lower the IR blocks into a form that can be represented by a SQL query. Args: ir_blocks: list of IR blocks to lower into SQL-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: tree representation of IR blocks for recursive traversal by SQL backend.
[ "Lower", "the", "IR", "blocks", "into", "a", "form", "that", "can", "be", "represented", "by", "a", "SQL", "query", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L17-L81
233,134
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_validate_all_blocks_supported
def _validate_all_blocks_supported(ir_blocks, query_metadata_table): """Validate that all IR blocks and ConstructResult fields passed to the backend are supported. Args: ir_blocks: List[BasicBlock], IR blocks to validate. query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Raises: NotImplementedError, if any block or ConstructResult field is unsupported. """ if len(ir_blocks) < 3: raise AssertionError( u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal ' u'query is expected to have at least a QueryRoot, GlobalOperationsStart, and ' u'ConstructResult block. The query metadata table is {}.'.format(query_metadata_table)) construct_result = _get_construct_result(ir_blocks) unsupported_blocks = [] unsupported_fields = [] for block in ir_blocks[:-1]: if isinstance(block, constants.SUPPORTED_BLOCK_TYPES): continue if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES): continue unsupported_blocks.append(block) for field_name, field in six.iteritems(construct_result.fields): if not isinstance(field, constants.SUPPORTED_OUTPUT_EXPRESSION_TYPES): unsupported_fields.append((field_name, field)) elif field.location.field in constants.UNSUPPORTED_META_FIELDS: unsupported_fields.append((field_name, field)) if len(unsupported_blocks) > 0 or len(unsupported_fields) > 0: raise NotImplementedError( u'Encountered unsupported blocks {} and unsupported fields {} during construction of ' u'SQL query tree for IR blocks {} with query metadata table {}.'.format( unsupported_blocks, unsupported_fields, ir_blocks, query_metadata_table))
python
def _validate_all_blocks_supported(ir_blocks, query_metadata_table): """Validate that all IR blocks and ConstructResult fields passed to the backend are supported. Args: ir_blocks: List[BasicBlock], IR blocks to validate. query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Raises: NotImplementedError, if any block or ConstructResult field is unsupported. """ if len(ir_blocks) < 3: raise AssertionError( u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal ' u'query is expected to have at least a QueryRoot, GlobalOperationsStart, and ' u'ConstructResult block. The query metadata table is {}.'.format(query_metadata_table)) construct_result = _get_construct_result(ir_blocks) unsupported_blocks = [] unsupported_fields = [] for block in ir_blocks[:-1]: if isinstance(block, constants.SUPPORTED_BLOCK_TYPES): continue if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES): continue unsupported_blocks.append(block) for field_name, field in six.iteritems(construct_result.fields): if not isinstance(field, constants.SUPPORTED_OUTPUT_EXPRESSION_TYPES): unsupported_fields.append((field_name, field)) elif field.location.field in constants.UNSUPPORTED_META_FIELDS: unsupported_fields.append((field_name, field)) if len(unsupported_blocks) > 0 or len(unsupported_fields) > 0: raise NotImplementedError( u'Encountered unsupported blocks {} and unsupported fields {} during construction of ' u'SQL query tree for IR blocks {} with query metadata table {}.'.format( unsupported_blocks, unsupported_fields, ir_blocks, query_metadata_table))
[ "def", "_validate_all_blocks_supported", "(", "ir_blocks", ",", "query_metadata_table", ")", ":", "if", "len", "(", "ir_blocks", ")", "<", "3", ":", "raise", "AssertionError", "(", "u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal '", "u'query is expected to have at least a QueryRoot, GlobalOperationsStart, and '", "u'ConstructResult block. The query metadata table is {}.'", ".", "format", "(", "query_metadata_table", ")", ")", "construct_result", "=", "_get_construct_result", "(", "ir_blocks", ")", "unsupported_blocks", "=", "[", "]", "unsupported_fields", "=", "[", "]", "for", "block", "in", "ir_blocks", "[", ":", "-", "1", "]", ":", "if", "isinstance", "(", "block", ",", "constants", ".", "SUPPORTED_BLOCK_TYPES", ")", ":", "continue", "if", "isinstance", "(", "block", ",", "constants", ".", "SKIPPABLE_BLOCK_TYPES", ")", ":", "continue", "unsupported_blocks", ".", "append", "(", "block", ")", "for", "field_name", ",", "field", "in", "six", ".", "iteritems", "(", "construct_result", ".", "fields", ")", ":", "if", "not", "isinstance", "(", "field", ",", "constants", ".", "SUPPORTED_OUTPUT_EXPRESSION_TYPES", ")", ":", "unsupported_fields", ".", "append", "(", "(", "field_name", ",", "field", ")", ")", "elif", "field", ".", "location", ".", "field", "in", "constants", ".", "UNSUPPORTED_META_FIELDS", ":", "unsupported_fields", ".", "append", "(", "(", "field_name", ",", "field", ")", ")", "if", "len", "(", "unsupported_blocks", ")", ">", "0", "or", "len", "(", "unsupported_fields", ")", ">", "0", ":", "raise", "NotImplementedError", "(", "u'Encountered unsupported blocks {} and unsupported fields {} during construction of '", "u'SQL query tree for IR blocks {} with query metadata table {}.'", ".", "format", "(", "unsupported_blocks", ",", "unsupported_fields", ",", "ir_blocks", ",", "query_metadata_table", ")", ")" ]
Validate that all IR blocks and ConstructResult fields passed to the backend are supported. Args: ir_blocks: List[BasicBlock], IR blocks to validate. query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Raises: NotImplementedError, if any block or ConstructResult field is unsupported.
[ "Validate", "that", "all", "IR", "blocks", "and", "ConstructResult", "fields", "passed", "to", "the", "backend", "are", "supported", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L84-L121
233,135
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_get_construct_result
def _get_construct_result(ir_blocks): """Return the ConstructResult block from a list of IR blocks.""" last_block = ir_blocks[-1] if not isinstance(last_block, blocks.ConstructResult): raise AssertionError( u'The last IR block {} for IR blocks {} was unexpectedly not ' u'a ConstructResult block.'.format(last_block, ir_blocks)) return last_block
python
def _get_construct_result(ir_blocks): """Return the ConstructResult block from a list of IR blocks.""" last_block = ir_blocks[-1] if not isinstance(last_block, blocks.ConstructResult): raise AssertionError( u'The last IR block {} for IR blocks {} was unexpectedly not ' u'a ConstructResult block.'.format(last_block, ir_blocks)) return last_block
[ "def", "_get_construct_result", "(", "ir_blocks", ")", ":", "last_block", "=", "ir_blocks", "[", "-", "1", "]", "if", "not", "isinstance", "(", "last_block", ",", "blocks", ".", "ConstructResult", ")", ":", "raise", "AssertionError", "(", "u'The last IR block {} for IR blocks {} was unexpectedly not '", "u'a ConstructResult block.'", ".", "format", "(", "last_block", ",", "ir_blocks", ")", ")", "return", "last_block" ]
Return the ConstructResult block from a list of IR blocks.
[ "Return", "the", "ConstructResult", "block", "from", "a", "list", "of", "IR", "blocks", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L124-L131
233,136
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_map_query_path_to_location_info
def _map_query_path_to_location_info(query_metadata_table): """Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path. """ query_path_to_location_info = {} for location, location_info in query_metadata_table.registered_locations: if not isinstance(location, Location): continue if location.query_path in query_path_to_location_info: # make sure the stored location information equals the new location information # for the fields the SQL backend requires. equivalent_location_info = query_path_to_location_info[location.query_path] if not _location_infos_equal(location_info, equivalent_location_info): raise AssertionError( u'Differing LocationInfos at query_path {} between {} and {}. Expected ' u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth ' u'and types to be equal for LocationInfos sharing the same query path.'.format( location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
python
def _map_query_path_to_location_info(query_metadata_table): """Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path. """ query_path_to_location_info = {} for location, location_info in query_metadata_table.registered_locations: if not isinstance(location, Location): continue if location.query_path in query_path_to_location_info: # make sure the stored location information equals the new location information # for the fields the SQL backend requires. equivalent_location_info = query_path_to_location_info[location.query_path] if not _location_infos_equal(location_info, equivalent_location_info): raise AssertionError( u'Differing LocationInfos at query_path {} between {} and {}. Expected ' u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth ' u'and types to be equal for LocationInfos sharing the same query path.'.format( location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
[ "def", "_map_query_path_to_location_info", "(", "query_metadata_table", ")", ":", "query_path_to_location_info", "=", "{", "}", "for", "location", ",", "location_info", "in", "query_metadata_table", ".", "registered_locations", ":", "if", "not", "isinstance", "(", "location", ",", "Location", ")", ":", "continue", "if", "location", ".", "query_path", "in", "query_path_to_location_info", ":", "# make sure the stored location information equals the new location information", "# for the fields the SQL backend requires.", "equivalent_location_info", "=", "query_path_to_location_info", "[", "location", ".", "query_path", "]", "if", "not", "_location_infos_equal", "(", "location_info", ",", "equivalent_location_info", ")", ":", "raise", "AssertionError", "(", "u'Differing LocationInfos at query_path {} between {} and {}. Expected '", "u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth '", "u'and types to be equal for LocationInfos sharing the same query path.'", ".", "format", "(", "location", ".", "query_path", ",", "location_info", ",", "equivalent_location_info", ")", ")", "query_path_to_location_info", "[", "location", ".", "query_path", "]", "=", "location_info", "return", "query_path_to_location_info" ]
Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
[ "Create", "a", "map", "from", "each", "query", "path", "to", "a", "LocationInfo", "at", "that", "path", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L134-L161
233,137
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_location_infos_equal
def _location_infos_equal(left, right): """Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise. LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth, recursive scopes depth, types and parent query paths are equal. Args: left: LocationInfo, left location info object to compare. right: LocationInfo, right location info object to compare. Returns: bool, True if LocationInfo objects equivalent, False otherwise. """ if not isinstance(left, LocationInfo) or not isinstance(right, LocationInfo): raise AssertionError( u'Unsupported LocationInfo comparison between types {} and {} ' u'with values {}, {}'.format(type(left), type(right), left, right)) optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth) parent_query_paths_equal = ( (left.parent_location is None and right.parent_location is None) or (left.parent_location.query_path == right.parent_location.query_path)) recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth) types_equal = left.type == right.type return all([ optional_scopes_depth_equal, parent_query_paths_equal, recursive_scopes_depths_equal, types_equal, ])
python
def _location_infos_equal(left, right): """Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise. LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth, recursive scopes depth, types and parent query paths are equal. Args: left: LocationInfo, left location info object to compare. right: LocationInfo, right location info object to compare. Returns: bool, True if LocationInfo objects equivalent, False otherwise. """ if not isinstance(left, LocationInfo) or not isinstance(right, LocationInfo): raise AssertionError( u'Unsupported LocationInfo comparison between types {} and {} ' u'with values {}, {}'.format(type(left), type(right), left, right)) optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth) parent_query_paths_equal = ( (left.parent_location is None and right.parent_location is None) or (left.parent_location.query_path == right.parent_location.query_path)) recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth) types_equal = left.type == right.type return all([ optional_scopes_depth_equal, parent_query_paths_equal, recursive_scopes_depths_equal, types_equal, ])
[ "def", "_location_infos_equal", "(", "left", ",", "right", ")", ":", "if", "not", "isinstance", "(", "left", ",", "LocationInfo", ")", "or", "not", "isinstance", "(", "right", ",", "LocationInfo", ")", ":", "raise", "AssertionError", "(", "u'Unsupported LocationInfo comparison between types {} and {} '", "u'with values {}, {}'", ".", "format", "(", "type", "(", "left", ")", ",", "type", "(", "right", ")", ",", "left", ",", "right", ")", ")", "optional_scopes_depth_equal", "=", "(", "left", ".", "optional_scopes_depth", "==", "right", ".", "optional_scopes_depth", ")", "parent_query_paths_equal", "=", "(", "(", "left", ".", "parent_location", "is", "None", "and", "right", ".", "parent_location", "is", "None", ")", "or", "(", "left", ".", "parent_location", ".", "query_path", "==", "right", ".", "parent_location", ".", "query_path", ")", ")", "recursive_scopes_depths_equal", "=", "(", "left", ".", "recursive_scopes_depth", "==", "right", ".", "recursive_scopes_depth", ")", "types_equal", "=", "left", ".", "type", "==", "right", ".", "type", "return", "all", "(", "[", "optional_scopes_depth_equal", ",", "parent_query_paths_equal", ",", "recursive_scopes_depths_equal", ",", "types_equal", ",", "]", ")" ]
Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise. LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth, recursive scopes depth, types and parent query paths are equal. Args: left: LocationInfo, left location info object to compare. right: LocationInfo, right location info object to compare. Returns: bool, True if LocationInfo objects equivalent, False otherwise.
[ "Return", "True", "if", "LocationInfo", "objects", "are", "equivalent", "for", "the", "SQL", "backend", "False", "otherwise", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L164-L196
233,138
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_map_query_path_to_outputs
def _map_query_path_to_outputs(construct_result, query_path_to_location_info): """Assign the output fields of a ConstructResult block to their respective query_path.""" query_path_to_output_fields = {} for output_name, field in six.iteritems(construct_result.fields): field_name = field.location.field output_query_path = field.location.query_path output_field_info = constants.SqlOutput( field_name=field_name, output_name=output_name, graphql_type=query_path_to_location_info[output_query_path].type) output_field_mapping = query_path_to_output_fields.setdefault(output_query_path, []) output_field_mapping.append(output_field_info) return query_path_to_output_fields
python
def _map_query_path_to_outputs(construct_result, query_path_to_location_info): """Assign the output fields of a ConstructResult block to their respective query_path.""" query_path_to_output_fields = {} for output_name, field in six.iteritems(construct_result.fields): field_name = field.location.field output_query_path = field.location.query_path output_field_info = constants.SqlOutput( field_name=field_name, output_name=output_name, graphql_type=query_path_to_location_info[output_query_path].type) output_field_mapping = query_path_to_output_fields.setdefault(output_query_path, []) output_field_mapping.append(output_field_info) return query_path_to_output_fields
[ "def", "_map_query_path_to_outputs", "(", "construct_result", ",", "query_path_to_location_info", ")", ":", "query_path_to_output_fields", "=", "{", "}", "for", "output_name", ",", "field", "in", "six", ".", "iteritems", "(", "construct_result", ".", "fields", ")", ":", "field_name", "=", "field", ".", "location", ".", "field", "output_query_path", "=", "field", ".", "location", ".", "query_path", "output_field_info", "=", "constants", ".", "SqlOutput", "(", "field_name", "=", "field_name", ",", "output_name", "=", "output_name", ",", "graphql_type", "=", "query_path_to_location_info", "[", "output_query_path", "]", ".", "type", ")", "output_field_mapping", "=", "query_path_to_output_fields", ".", "setdefault", "(", "output_query_path", ",", "[", "]", ")", "output_field_mapping", ".", "append", "(", "output_field_info", ")", "return", "query_path_to_output_fields" ]
Assign the output fields of a ConstructResult block to their respective query_path.
[ "Assign", "the", "output", "fields", "of", "a", "ConstructResult", "block", "to", "their", "respective", "query_path", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L199-L211
233,139
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
_map_block_index_to_location
def _map_block_index_to_location(ir_blocks): """Associate each IR block with its corresponding location, by index.""" block_index_to_location = {} # MarkLocation blocks occur after the blocks related to that location. # The core approach here is to buffer blocks until their MarkLocation is encountered # after which all buffered blocks can be associated with the encountered MarkLocation.location. current_block_ixs = [] for num, ir_block in enumerate(ir_blocks): if isinstance(ir_block, blocks.GlobalOperationsStart): if len(current_block_ixs) > 0: unassociated_blocks = [ir_blocks[ix] for ix in current_block_ixs] raise AssertionError( u'Unexpectedly encountered global operations before mapping blocks ' u'{} to their respective locations.'.format(unassociated_blocks)) break current_block_ixs.append(num) if isinstance(ir_block, blocks.MarkLocation): for ix in current_block_ixs: block_index_to_location[ix] = ir_block.location current_block_ixs = [] return block_index_to_location
python
def _map_block_index_to_location(ir_blocks): """Associate each IR block with its corresponding location, by index.""" block_index_to_location = {} # MarkLocation blocks occur after the blocks related to that location. # The core approach here is to buffer blocks until their MarkLocation is encountered # after which all buffered blocks can be associated with the encountered MarkLocation.location. current_block_ixs = [] for num, ir_block in enumerate(ir_blocks): if isinstance(ir_block, blocks.GlobalOperationsStart): if len(current_block_ixs) > 0: unassociated_blocks = [ir_blocks[ix] for ix in current_block_ixs] raise AssertionError( u'Unexpectedly encountered global operations before mapping blocks ' u'{} to their respective locations.'.format(unassociated_blocks)) break current_block_ixs.append(num) if isinstance(ir_block, blocks.MarkLocation): for ix in current_block_ixs: block_index_to_location[ix] = ir_block.location current_block_ixs = [] return block_index_to_location
[ "def", "_map_block_index_to_location", "(", "ir_blocks", ")", ":", "block_index_to_location", "=", "{", "}", "# MarkLocation blocks occur after the blocks related to that location.", "# The core approach here is to buffer blocks until their MarkLocation is encountered", "# after which all buffered blocks can be associated with the encountered MarkLocation.location.", "current_block_ixs", "=", "[", "]", "for", "num", ",", "ir_block", "in", "enumerate", "(", "ir_blocks", ")", ":", "if", "isinstance", "(", "ir_block", ",", "blocks", ".", "GlobalOperationsStart", ")", ":", "if", "len", "(", "current_block_ixs", ")", ">", "0", ":", "unassociated_blocks", "=", "[", "ir_blocks", "[", "ix", "]", "for", "ix", "in", "current_block_ixs", "]", "raise", "AssertionError", "(", "u'Unexpectedly encountered global operations before mapping blocks '", "u'{} to their respective locations.'", ".", "format", "(", "unassociated_blocks", ")", ")", "break", "current_block_ixs", ".", "append", "(", "num", ")", "if", "isinstance", "(", "ir_block", ",", "blocks", ".", "MarkLocation", ")", ":", "for", "ix", "in", "current_block_ixs", ":", "block_index_to_location", "[", "ix", "]", "=", "ir_block", ".", "location", "current_block_ixs", "=", "[", "]", "return", "block_index_to_location" ]
Associate each IR block with its corresponding location, by index.
[ "Associate", "each", "IR", "block", "with", "its", "corresponding", "location", "by", "index", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L214-L234
233,140
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
lower_unary_transformations
def lower_unary_transformations(ir_blocks): """Raise exception if any unary transformation block encountered.""" def visitor_fn(expression): """Raise error if current expression is a UnaryTransformation.""" if not isinstance(expression, expressions.UnaryTransformation): return expression raise NotImplementedError( u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by ' u'the SQL backend.'.format(expression, ir_blocks) ) new_ir_blocks = [ block.visit_and_update_expressions(visitor_fn) for block in ir_blocks ] return new_ir_blocks
python
def lower_unary_transformations(ir_blocks): """Raise exception if any unary transformation block encountered.""" def visitor_fn(expression): """Raise error if current expression is a UnaryTransformation.""" if not isinstance(expression, expressions.UnaryTransformation): return expression raise NotImplementedError( u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by ' u'the SQL backend.'.format(expression, ir_blocks) ) new_ir_blocks = [ block.visit_and_update_expressions(visitor_fn) for block in ir_blocks ] return new_ir_blocks
[ "def", "lower_unary_transformations", "(", "ir_blocks", ")", ":", "def", "visitor_fn", "(", "expression", ")", ":", "\"\"\"Raise error if current expression is a UnaryTransformation.\"\"\"", "if", "not", "isinstance", "(", "expression", ",", "expressions", ".", "UnaryTransformation", ")", ":", "return", "expression", "raise", "NotImplementedError", "(", "u'UnaryTransformation expression \"{}\" encountered with IR blocks {} is unsupported by '", "u'the SQL backend.'", ".", "format", "(", "expression", ",", "ir_blocks", ")", ")", "new_ir_blocks", "=", "[", "block", ".", "visit_and_update_expressions", "(", "visitor_fn", ")", "for", "block", "in", "ir_blocks", "]", "return", "new_ir_blocks" ]
Raise exception if any unary transformation block encountered.
[ "Raise", "exception", "if", "any", "unary", "transformation", "block", "encountered", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L237-L252
233,141
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_sql/__init__.py
lower_unsupported_metafield_expressions
def lower_unsupported_metafield_expressions(ir_blocks): """Raise exception if an unsupported metafield is encountered in any LocalField expression.""" def visitor_fn(expression): """Visitor function raising exception for any unsupported metafield.""" if not isinstance(expression, expressions.LocalField): return expression if expression.field_name not in constants.UNSUPPORTED_META_FIELDS: return expression raise NotImplementedError( u'Encountered unsupported metafield {} in LocalField {} during construction of ' u'SQL query tree for IR blocks {}.'.format( constants.UNSUPPORTED_META_FIELDS[expression.field_name], expression, ir_blocks)) new_ir_blocks = [ block.visit_and_update_expressions(visitor_fn) for block in ir_blocks ] return new_ir_blocks
python
def lower_unsupported_metafield_expressions(ir_blocks): """Raise exception if an unsupported metafield is encountered in any LocalField expression.""" def visitor_fn(expression): """Visitor function raising exception for any unsupported metafield.""" if not isinstance(expression, expressions.LocalField): return expression if expression.field_name not in constants.UNSUPPORTED_META_FIELDS: return expression raise NotImplementedError( u'Encountered unsupported metafield {} in LocalField {} during construction of ' u'SQL query tree for IR blocks {}.'.format( constants.UNSUPPORTED_META_FIELDS[expression.field_name], expression, ir_blocks)) new_ir_blocks = [ block.visit_and_update_expressions(visitor_fn) for block in ir_blocks ] return new_ir_blocks
[ "def", "lower_unsupported_metafield_expressions", "(", "ir_blocks", ")", ":", "def", "visitor_fn", "(", "expression", ")", ":", "\"\"\"Visitor function raising exception for any unsupported metafield.\"\"\"", "if", "not", "isinstance", "(", "expression", ",", "expressions", ".", "LocalField", ")", ":", "return", "expression", "if", "expression", ".", "field_name", "not", "in", "constants", ".", "UNSUPPORTED_META_FIELDS", ":", "return", "expression", "raise", "NotImplementedError", "(", "u'Encountered unsupported metafield {} in LocalField {} during construction of '", "u'SQL query tree for IR blocks {}.'", ".", "format", "(", "constants", ".", "UNSUPPORTED_META_FIELDS", "[", "expression", ".", "field_name", "]", ",", "expression", ",", "ir_blocks", ")", ")", "new_ir_blocks", "=", "[", "block", ".", "visit_and_update_expressions", "(", "visitor_fn", ")", "for", "block", "in", "ir_blocks", "]", "return", "new_ir_blocks" ]
Raise exception if an unsupported metafield is encountered in any LocalField expression.
[ "Raise", "exception", "if", "an", "unsupported", "metafield", "is", "encountered", "in", "any", "LocalField", "expression", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_sql/__init__.py#L255-L272
233,142
kensho-technologies/graphql-compiler
graphql_compiler/__init__.py
get_graphql_schema_from_orientdb_schema_data
def get_graphql_schema_from_orientdb_schema_data(schema_data, class_to_field_type_overrides=None, hidden_classes=None): """Construct a GraphQL schema from an OrientDB schema. Args: schema_data: list of dicts describing the classes in the OrientDB schema. The following format is the way the data is structured in OrientDB 2. See the README.md file for an example of how to query this data. Each dict has the following string fields: - name: string, the name of the class. - superClasses (optional): list of strings, the name of the class's superclasses. - superClass (optional): string, the name of the class's superclass. May be used instead of superClasses if there is only one superClass. Used for backwards compatibility with OrientDB. - customFields (optional): dict, string -> string, data defined on the class instead of instances of the class. - abstract: bool, true if the class is abstract. - properties: list of dicts, describing the class's properties. Each property dictionary has the following string fields: - name: string, the name of the property. - type: int, builtin OrientDB type ID of the property. See schema_properties.py for the mapping. - linkedType (optional): int, if the property is a collection of builtin OrientDB objects, then it indicates their type ID. - linkedClass (optional): string, if the property is a collection of class instances, then it indicates the name of the class. If class is an edge class, and the field name is either 'in' or 'out', then it describes the name of an endpoint of the edge. - defaultValue: string, the textual representation of the default value for the property, as returned by OrientDB's schema introspection code, e.g., '{}' for the embedded set type. Note that if the property is a collection type, it must have a default value. class_to_field_type_overrides: optional dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: optional set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}). """ if class_to_field_type_overrides is None: class_to_field_type_overrides = dict() if hidden_classes is None: hidden_classes = set() schema_graph = SchemaGraph(schema_data) return get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides, hidden_classes)
python
def get_graphql_schema_from_orientdb_schema_data(schema_data, class_to_field_type_overrides=None, hidden_classes=None): """Construct a GraphQL schema from an OrientDB schema. Args: schema_data: list of dicts describing the classes in the OrientDB schema. The following format is the way the data is structured in OrientDB 2. See the README.md file for an example of how to query this data. Each dict has the following string fields: - name: string, the name of the class. - superClasses (optional): list of strings, the name of the class's superclasses. - superClass (optional): string, the name of the class's superclass. May be used instead of superClasses if there is only one superClass. Used for backwards compatibility with OrientDB. - customFields (optional): dict, string -> string, data defined on the class instead of instances of the class. - abstract: bool, true if the class is abstract. - properties: list of dicts, describing the class's properties. Each property dictionary has the following string fields: - name: string, the name of the property. - type: int, builtin OrientDB type ID of the property. See schema_properties.py for the mapping. - linkedType (optional): int, if the property is a collection of builtin OrientDB objects, then it indicates their type ID. - linkedClass (optional): string, if the property is a collection of class instances, then it indicates the name of the class. If class is an edge class, and the field name is either 'in' or 'out', then it describes the name of an endpoint of the edge. - defaultValue: string, the textual representation of the default value for the property, as returned by OrientDB's schema introspection code, e.g., '{}' for the embedded set type. Note that if the property is a collection type, it must have a default value. class_to_field_type_overrides: optional dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: optional set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}). """ if class_to_field_type_overrides is None: class_to_field_type_overrides = dict() if hidden_classes is None: hidden_classes = set() schema_graph = SchemaGraph(schema_data) return get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides, hidden_classes)
[ "def", "get_graphql_schema_from_orientdb_schema_data", "(", "schema_data", ",", "class_to_field_type_overrides", "=", "None", ",", "hidden_classes", "=", "None", ")", ":", "if", "class_to_field_type_overrides", "is", "None", ":", "class_to_field_type_overrides", "=", "dict", "(", ")", "if", "hidden_classes", "is", "None", ":", "hidden_classes", "=", "set", "(", ")", "schema_graph", "=", "SchemaGraph", "(", "schema_data", ")", "return", "get_graphql_schema_from_schema_graph", "(", "schema_graph", ",", "class_to_field_type_overrides", ",", "hidden_classes", ")" ]
Construct a GraphQL schema from an OrientDB schema. Args: schema_data: list of dicts describing the classes in the OrientDB schema. The following format is the way the data is structured in OrientDB 2. See the README.md file for an example of how to query this data. Each dict has the following string fields: - name: string, the name of the class. - superClasses (optional): list of strings, the name of the class's superclasses. - superClass (optional): string, the name of the class's superclass. May be used instead of superClasses if there is only one superClass. Used for backwards compatibility with OrientDB. - customFields (optional): dict, string -> string, data defined on the class instead of instances of the class. - abstract: bool, true if the class is abstract. - properties: list of dicts, describing the class's properties. Each property dictionary has the following string fields: - name: string, the name of the property. - type: int, builtin OrientDB type ID of the property. See schema_properties.py for the mapping. - linkedType (optional): int, if the property is a collection of builtin OrientDB objects, then it indicates their type ID. - linkedClass (optional): string, if the property is a collection of class instances, then it indicates the name of the class. If class is an edge class, and the field name is either 'in' or 'out', then it describes the name of an endpoint of the edge. - defaultValue: string, the textual representation of the default value for the property, as returned by OrientDB's schema introspection code, e.g., '{}' for the embedded set type. Note that if the property is a collection type, it must have a default value. class_to_field_type_overrides: optional dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: optional set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
[ "Construct", "a", "GraphQL", "schema", "from", "an", "OrientDB", "schema", "." ]
f6079c6d10f64932f6b3af309b79bcea2123ca8f
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/__init__.py#L139-L199
233,143
slackapi/python-slack-events-api
slackeventsapi/__init__.py
SlackEventAdapter.start
def start(self, host='127.0.0.1', port=None, debug=False, **kwargs): """ Start the built in webserver, bound to the host and port you'd like. Default host is `127.0.0.1` and port 8080. :param host: The host you want to bind the build in webserver to :param port: The port number you want the webserver to run on :param debug: Set to `True` to enable debug level logging :param kwargs: Additional arguments you'd like to pass to Flask """ self.server.run(host=host, port=port, debug=debug, **kwargs)
python
def start(self, host='127.0.0.1', port=None, debug=False, **kwargs): """ Start the built in webserver, bound to the host and port you'd like. Default host is `127.0.0.1` and port 8080. :param host: The host you want to bind the build in webserver to :param port: The port number you want the webserver to run on :param debug: Set to `True` to enable debug level logging :param kwargs: Additional arguments you'd like to pass to Flask """ self.server.run(host=host, port=port, debug=debug, **kwargs)
[ "def", "start", "(", "self", ",", "host", "=", "'127.0.0.1'", ",", "port", "=", "None", ",", "debug", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "server", ".", "run", "(", "host", "=", "host", ",", "port", "=", "port", ",", "debug", "=", "debug", ",", "*", "*", "kwargs", ")" ]
Start the built in webserver, bound to the host and port you'd like. Default host is `127.0.0.1` and port 8080. :param host: The host you want to bind the build in webserver to :param port: The port number you want the webserver to run on :param debug: Set to `True` to enable debug level logging :param kwargs: Additional arguments you'd like to pass to Flask
[ "Start", "the", "built", "in", "webserver", "bound", "to", "the", "host", "and", "port", "you", "d", "like", ".", "Default", "host", "is", "127", ".", "0", ".", "0", ".", "1", "and", "port", "8080", "." ]
1254d83181eb939f124a0e4746dafea7e14047c1
https://github.com/slackapi/python-slack-events-api/blob/1254d83181eb939f124a0e4746dafea7e14047c1/slackeventsapi/__init__.py#L13-L23
233,144
apragacz/django-rest-registration
rest_registration/api/views/login.py
login
def login(request): ''' Logs in the user via given login and password. ''' serializer_class = registration_settings.LOGIN_SERIALIZER_CLASS serializer = serializer_class(data=request.data) serializer.is_valid(raise_exception=True) user = serializer.get_authenticated_user() if not user: raise BadRequest('Login or password invalid.') extra_data = perform_login(request, user) return get_ok_response('Login successful', extra_data=extra_data)
python
def login(request): ''' Logs in the user via given login and password. ''' serializer_class = registration_settings.LOGIN_SERIALIZER_CLASS serializer = serializer_class(data=request.data) serializer.is_valid(raise_exception=True) user = serializer.get_authenticated_user() if not user: raise BadRequest('Login or password invalid.') extra_data = perform_login(request, user) return get_ok_response('Login successful', extra_data=extra_data)
[ "def", "login", "(", "request", ")", ":", "serializer_class", "=", "registration_settings", ".", "LOGIN_SERIALIZER_CLASS", "serializer", "=", "serializer_class", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "user", "=", "serializer", ".", "get_authenticated_user", "(", ")", "if", "not", "user", ":", "raise", "BadRequest", "(", "'Login or password invalid.'", ")", "extra_data", "=", "perform_login", "(", "request", ",", "user", ")", "return", "get_ok_response", "(", "'Login successful'", ",", "extra_data", "=", "extra_data", ")" ]
Logs in the user via given login and password.
[ "Logs", "in", "the", "user", "via", "given", "login", "and", "password", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/login.py#L25-L39
233,145
apragacz/django-rest-registration
rest_registration/api/views/login.py
logout
def logout(request): ''' Logs out the user. returns an error if the user is not authenticated. ''' user = request.user serializer = LogoutSerializer(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data if should_authenticate_session(): auth.logout(request) if should_retrieve_token() and data['revoke_token']: try: user.auth_token.delete() except Token.DoesNotExist: raise BadRequest('Cannot remove non-existent token') return get_ok_response('Logout successful')
python
def logout(request): ''' Logs out the user. returns an error if the user is not authenticated. ''' user = request.user serializer = LogoutSerializer(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data if should_authenticate_session(): auth.logout(request) if should_retrieve_token() and data['revoke_token']: try: user.auth_token.delete() except Token.DoesNotExist: raise BadRequest('Cannot remove non-existent token') return get_ok_response('Logout successful')
[ "def", "logout", "(", "request", ")", ":", "user", "=", "request", ".", "user", "serializer", "=", "LogoutSerializer", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "data", "=", "serializer", ".", "validated_data", "if", "should_authenticate_session", "(", ")", ":", "auth", ".", "logout", "(", "request", ")", "if", "should_retrieve_token", "(", ")", "and", "data", "[", "'revoke_token'", "]", ":", "try", ":", "user", ".", "auth_token", ".", "delete", "(", ")", "except", "Token", ".", "DoesNotExist", ":", "raise", "BadRequest", "(", "'Cannot remove non-existent token'", ")", "return", "get_ok_response", "(", "'Logout successful'", ")" ]
Logs out the user. returns an error if the user is not authenticated.
[ "Logs", "out", "the", "user", ".", "returns", "an", "error", "if", "the", "user", "is", "not", "authenticated", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/login.py#L49-L67
233,146
apragacz/django-rest-registration
rest_registration/utils/users.py
get_object_or_404
def get_object_or_404(queryset, *filter_args, **filter_kwargs): """ Same as Django's standard shortcut, but make sure to also raise 404 if the filter_kwargs don't match the required types. This function was copied from rest_framework.generics because of issue #36. """ try: return _get_object_or_404(queryset, *filter_args, **filter_kwargs) except (TypeError, ValueError, ValidationError): raise Http404
python
def get_object_or_404(queryset, *filter_args, **filter_kwargs): """ Same as Django's standard shortcut, but make sure to also raise 404 if the filter_kwargs don't match the required types. This function was copied from rest_framework.generics because of issue #36. """ try: return _get_object_or_404(queryset, *filter_args, **filter_kwargs) except (TypeError, ValueError, ValidationError): raise Http404
[ "def", "get_object_or_404", "(", "queryset", ",", "*", "filter_args", ",", "*", "*", "filter_kwargs", ")", ":", "try", ":", "return", "_get_object_or_404", "(", "queryset", ",", "*", "filter_args", ",", "*", "*", "filter_kwargs", ")", "except", "(", "TypeError", ",", "ValueError", ",", "ValidationError", ")", ":", "raise", "Http404" ]
Same as Django's standard shortcut, but make sure to also raise 404 if the filter_kwargs don't match the required types. This function was copied from rest_framework.generics because of issue #36.
[ "Same", "as", "Django", "s", "standard", "shortcut", "but", "make", "sure", "to", "also", "raise", "404", "if", "the", "filter_kwargs", "don", "t", "match", "the", "required", "types", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/utils/users.py#L13-L23
233,147
apragacz/django-rest-registration
rest_registration/api/views/profile.py
profile
def profile(request): ''' Get or set user profile. ''' serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS if request.method in ['POST', 'PUT', 'PATCH']: partial = request.method == 'PATCH' serializer = serializer_class( instance=request.user, data=request.data, partial=partial, ) serializer.is_valid(raise_exception=True) serializer.save() else: # request.method == 'GET': serializer = serializer_class(instance=request.user) return Response(serializer.data)
python
def profile(request): ''' Get or set user profile. ''' serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS if request.method in ['POST', 'PUT', 'PATCH']: partial = request.method == 'PATCH' serializer = serializer_class( instance=request.user, data=request.data, partial=partial, ) serializer.is_valid(raise_exception=True) serializer.save() else: # request.method == 'GET': serializer = serializer_class(instance=request.user) return Response(serializer.data)
[ "def", "profile", "(", "request", ")", ":", "serializer_class", "=", "registration_settings", ".", "PROFILE_SERIALIZER_CLASS", "if", "request", ".", "method", "in", "[", "'POST'", ",", "'PUT'", ",", "'PATCH'", "]", ":", "partial", "=", "request", ".", "method", "==", "'PATCH'", "serializer", "=", "serializer_class", "(", "instance", "=", "request", ".", "user", ",", "data", "=", "request", ".", "data", ",", "partial", "=", "partial", ",", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "serializer", ".", "save", "(", ")", "else", ":", "# request.method == 'GET':", "serializer", "=", "serializer_class", "(", "instance", "=", "request", ".", "user", ")", "return", "Response", "(", "serializer", ".", "data", ")" ]
Get or set user profile.
[ "Get", "or", "set", "user", "profile", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/profile.py#L13-L30
233,148
apragacz/django-rest-registration
rest_registration/api/views/register.py
register
def register(request): ''' Register new user. ''' serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS serializer = serializer_class(data=request.data) serializer.is_valid(raise_exception=True) kwargs = {} if registration_settings.REGISTER_VERIFICATION_ENABLED: verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD') kwargs[verification_flag_field] = False email_field = get_user_setting('EMAIL_FIELD') if (email_field not in serializer.validated_data or not serializer.validated_data[email_field]): raise BadRequest("User without email cannot be verified") user = serializer.save(**kwargs) output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS # noqa: E501 output_serializer = output_serializer_class(instance=user) user_data = output_serializer.data if registration_settings.REGISTER_VERIFICATION_ENABLED: signer = RegisterSigner({ 'user_id': user.pk, }, request=request) template_config = ( registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES) send_verification_notification(user, signer, template_config) return Response(user_data, status=status.HTTP_201_CREATED)
python
def register(request): ''' Register new user. ''' serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS serializer = serializer_class(data=request.data) serializer.is_valid(raise_exception=True) kwargs = {} if registration_settings.REGISTER_VERIFICATION_ENABLED: verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD') kwargs[verification_flag_field] = False email_field = get_user_setting('EMAIL_FIELD') if (email_field not in serializer.validated_data or not serializer.validated_data[email_field]): raise BadRequest("User without email cannot be verified") user = serializer.save(**kwargs) output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS # noqa: E501 output_serializer = output_serializer_class(instance=user) user_data = output_serializer.data if registration_settings.REGISTER_VERIFICATION_ENABLED: signer = RegisterSigner({ 'user_id': user.pk, }, request=request) template_config = ( registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES) send_verification_notification(user, signer, template_config) return Response(user_data, status=status.HTTP_201_CREATED)
[ "def", "register", "(", "request", ")", ":", "serializer_class", "=", "registration_settings", ".", "REGISTER_SERIALIZER_CLASS", "serializer", "=", "serializer_class", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "kwargs", "=", "{", "}", "if", "registration_settings", ".", "REGISTER_VERIFICATION_ENABLED", ":", "verification_flag_field", "=", "get_user_setting", "(", "'VERIFICATION_FLAG_FIELD'", ")", "kwargs", "[", "verification_flag_field", "]", "=", "False", "email_field", "=", "get_user_setting", "(", "'EMAIL_FIELD'", ")", "if", "(", "email_field", "not", "in", "serializer", ".", "validated_data", "or", "not", "serializer", ".", "validated_data", "[", "email_field", "]", ")", ":", "raise", "BadRequest", "(", "\"User without email cannot be verified\"", ")", "user", "=", "serializer", ".", "save", "(", "*", "*", "kwargs", ")", "output_serializer_class", "=", "registration_settings", ".", "REGISTER_OUTPUT_SERIALIZER_CLASS", "# noqa: E501", "output_serializer", "=", "output_serializer_class", "(", "instance", "=", "user", ")", "user_data", "=", "output_serializer", ".", "data", "if", "registration_settings", ".", "REGISTER_VERIFICATION_ENABLED", ":", "signer", "=", "RegisterSigner", "(", "{", "'user_id'", ":", "user", ".", "pk", ",", "}", ",", "request", "=", "request", ")", "template_config", "=", "(", "registration_settings", ".", "REGISTER_VERIFICATION_EMAIL_TEMPLATES", ")", "send_verification_notification", "(", "user", ",", "signer", ",", "template_config", ")", "return", "Response", "(", "user_data", ",", "status", "=", "status", ".", "HTTP_201_CREATED", ")" ]
Register new user.
[ "Register", "new", "user", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register.py#L54-L86
233,149
apragacz/django-rest-registration
rest_registration/api/views/register.py
verify_registration
def verify_registration(request): """ Verify registration via signature. """ user = process_verify_registration_data(request.data) extra_data = None if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN: extra_data = perform_login(request, user) return get_ok_response('User verified successfully', extra_data=extra_data)
python
def verify_registration(request): """ Verify registration via signature. """ user = process_verify_registration_data(request.data) extra_data = None if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN: extra_data = perform_login(request, user) return get_ok_response('User verified successfully', extra_data=extra_data)
[ "def", "verify_registration", "(", "request", ")", ":", "user", "=", "process_verify_registration_data", "(", "request", ".", "data", ")", "extra_data", "=", "None", "if", "registration_settings", ".", "REGISTER_VERIFICATION_AUTO_LOGIN", ":", "extra_data", "=", "perform_login", "(", "request", ",", "user", ")", "return", "get_ok_response", "(", "'User verified successfully'", ",", "extra_data", "=", "extra_data", ")" ]
Verify registration via signature.
[ "Verify", "registration", "via", "signature", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register.py#L98-L106
233,150
apragacz/django-rest-registration
setup.py
get_requirements
def get_requirements(requirements_filepath): ''' Return list of this package requirements via local filepath. ''' requirements = [] with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f: for line in f: if line.startswith('#'): continue line = line.rstrip() if not line: continue requirements.append(line) return requirements
python
def get_requirements(requirements_filepath): ''' Return list of this package requirements via local filepath. ''' requirements = [] with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f: for line in f: if line.startswith('#'): continue line = line.rstrip() if not line: continue requirements.append(line) return requirements
[ "def", "get_requirements", "(", "requirements_filepath", ")", ":", "requirements", "=", "[", "]", "with", "open", "(", "os", ".", "path", ".", "join", "(", "ROOT_DIR", ",", "requirements_filepath", ")", ",", "'rt'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "line", "=", "line", ".", "rstrip", "(", ")", "if", "not", "line", ":", "continue", "requirements", ".", "append", "(", "line", ")", "return", "requirements" ]
Return list of this package requirements via local filepath.
[ "Return", "list", "of", "this", "package", "requirements", "via", "local", "filepath", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/setup.py#L15-L28
233,151
apragacz/django-rest-registration
rest_registration/api/views/reset_password.py
send_reset_password_link
def send_reset_password_link(request): ''' Send email with reset password link. ''' if not registration_settings.RESET_PASSWORD_VERIFICATION_ENABLED: raise Http404() serializer = SendResetPasswordLinkSerializer(data=request.data) serializer.is_valid(raise_exception=True) login = serializer.validated_data['login'] user = None for login_field in get_login_fields(): user = get_user_by_lookup_dict( {login_field: login}, default=None, require_verified=False) if user: break if not user: raise UserNotFound() signer = ResetPasswordSigner({ 'user_id': user.pk, }, request=request) template_config = ( registration_settings.RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES) send_verification_notification(user, signer, template_config) return get_ok_response('Reset link sent')
python
def send_reset_password_link(request): ''' Send email with reset password link. ''' if not registration_settings.RESET_PASSWORD_VERIFICATION_ENABLED: raise Http404() serializer = SendResetPasswordLinkSerializer(data=request.data) serializer.is_valid(raise_exception=True) login = serializer.validated_data['login'] user = None for login_field in get_login_fields(): user = get_user_by_lookup_dict( {login_field: login}, default=None, require_verified=False) if user: break if not user: raise UserNotFound() signer = ResetPasswordSigner({ 'user_id': user.pk, }, request=request) template_config = ( registration_settings.RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES) send_verification_notification(user, signer, template_config) return get_ok_response('Reset link sent')
[ "def", "send_reset_password_link", "(", "request", ")", ":", "if", "not", "registration_settings", ".", "RESET_PASSWORD_VERIFICATION_ENABLED", ":", "raise", "Http404", "(", ")", "serializer", "=", "SendResetPasswordLinkSerializer", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "login", "=", "serializer", ".", "validated_data", "[", "'login'", "]", "user", "=", "None", "for", "login_field", "in", "get_login_fields", "(", ")", ":", "user", "=", "get_user_by_lookup_dict", "(", "{", "login_field", ":", "login", "}", ",", "default", "=", "None", ",", "require_verified", "=", "False", ")", "if", "user", ":", "break", "if", "not", "user", ":", "raise", "UserNotFound", "(", ")", "signer", "=", "ResetPasswordSigner", "(", "{", "'user_id'", ":", "user", ".", "pk", ",", "}", ",", "request", "=", "request", ")", "template_config", "=", "(", "registration_settings", ".", "RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES", ")", "send_verification_notification", "(", "user", ",", "signer", ",", "template_config", ")", "return", "get_ok_response", "(", "'Reset link sent'", ")" ]
Send email with reset password link.
[ "Send", "email", "with", "reset", "password", "link", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/reset_password.py#L61-L89
233,152
apragacz/django-rest-registration
rest_registration/api/views/register_email.py
register_email
def register_email(request): ''' Register new email. ''' user = request.user serializer = RegisterEmailSerializer(data=request.data) serializer.is_valid(raise_exception=True) email = serializer.validated_data['email'] template_config = ( registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES) if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED: signer = RegisterEmailSigner({ 'user_id': user.pk, 'email': email, }, request=request) send_verification_notification( user, signer, template_config, email=email) else: email_field = get_user_setting('EMAIL_FIELD') setattr(user, email_field, email) user.save() return get_ok_response('Register email link email sent')
python
def register_email(request): ''' Register new email. ''' user = request.user serializer = RegisterEmailSerializer(data=request.data) serializer.is_valid(raise_exception=True) email = serializer.validated_data['email'] template_config = ( registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES) if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED: signer = RegisterEmailSigner({ 'user_id': user.pk, 'email': email, }, request=request) send_verification_notification( user, signer, template_config, email=email) else: email_field = get_user_setting('EMAIL_FIELD') setattr(user, email_field, email) user.save() return get_ok_response('Register email link email sent')
[ "def", "register_email", "(", "request", ")", ":", "user", "=", "request", ".", "user", "serializer", "=", "RegisterEmailSerializer", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "email", "=", "serializer", ".", "validated_data", "[", "'email'", "]", "template_config", "=", "(", "registration_settings", ".", "REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES", ")", "if", "registration_settings", ".", "REGISTER_EMAIL_VERIFICATION_ENABLED", ":", "signer", "=", "RegisterEmailSigner", "(", "{", "'user_id'", ":", "user", ".", "pk", ",", "'email'", ":", "email", ",", "}", ",", "request", "=", "request", ")", "send_verification_notification", "(", "user", ",", "signer", ",", "template_config", ",", "email", "=", "email", ")", "else", ":", "email_field", "=", "get_user_setting", "(", "'EMAIL_FIELD'", ")", "setattr", "(", "user", ",", "email_field", ",", "email", ")", "user", ".", "save", "(", ")", "return", "get_ok_response", "(", "'Register email link email sent'", ")" ]
Register new email.
[ "Register", "new", "email", "." ]
7373571264dd567c2a73a97ff4c45b64f113605b
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register_email.py#L33-L58
233,153
nschloe/matplotlib2tikz
matplotlib2tikz/axes.py
_is_colorbar_heuristic
def _is_colorbar_heuristic(obj): """Find out if the object is in fact a color bar. """ # TODO come up with something more accurate here # Might help: # TODO Are the colorbars exactly the l.collections.PolyCollection's? try: aspect = float(obj.get_aspect()) except ValueError: # e.g., aspect == 'equal' return False # Assume that something is a colorbar if and only if the ratio is above 5.0 # and there are no ticks on the corresponding axis. This isn't always true, # though: The ratio of a color can be freely adjusted by the aspect # keyword, e.g., # # plt.colorbar(im, aspect=5) # limit_ratio = 5.0 return (aspect >= limit_ratio and len(obj.get_xticks()) == 0) or ( aspect <= 1.0 / limit_ratio and len(obj.get_yticks()) == 0 )
python
def _is_colorbar_heuristic(obj): """Find out if the object is in fact a color bar. """ # TODO come up with something more accurate here # Might help: # TODO Are the colorbars exactly the l.collections.PolyCollection's? try: aspect = float(obj.get_aspect()) except ValueError: # e.g., aspect == 'equal' return False # Assume that something is a colorbar if and only if the ratio is above 5.0 # and there are no ticks on the corresponding axis. This isn't always true, # though: The ratio of a color can be freely adjusted by the aspect # keyword, e.g., # # plt.colorbar(im, aspect=5) # limit_ratio = 5.0 return (aspect >= limit_ratio and len(obj.get_xticks()) == 0) or ( aspect <= 1.0 / limit_ratio and len(obj.get_yticks()) == 0 )
[ "def", "_is_colorbar_heuristic", "(", "obj", ")", ":", "# TODO come up with something more accurate here", "# Might help:", "# TODO Are the colorbars exactly the l.collections.PolyCollection's?", "try", ":", "aspect", "=", "float", "(", "obj", ".", "get_aspect", "(", ")", ")", "except", "ValueError", ":", "# e.g., aspect == 'equal'", "return", "False", "# Assume that something is a colorbar if and only if the ratio is above 5.0", "# and there are no ticks on the corresponding axis. This isn't always true,", "# though: The ratio of a color can be freely adjusted by the aspect", "# keyword, e.g.,", "#", "# plt.colorbar(im, aspect=5)", "#", "limit_ratio", "=", "5.0", "return", "(", "aspect", ">=", "limit_ratio", "and", "len", "(", "obj", ".", "get_xticks", "(", ")", ")", "==", "0", ")", "or", "(", "aspect", "<=", "1.0", "/", "limit_ratio", "and", "len", "(", "obj", ".", "get_yticks", "(", ")", ")", "==", "0", ")" ]
Find out if the object is in fact a color bar.
[ "Find", "out", "if", "the", "object", "is", "in", "fact", "a", "color", "bar", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L582-L605
233,154
nschloe/matplotlib2tikz
matplotlib2tikz/axes.py
_mpl_cmap2pgf_cmap
def _mpl_cmap2pgf_cmap(cmap, data): """Converts a color map as given in matplotlib to a color map as represented in PGFPlots. """ if isinstance(cmap, mpl.colors.LinearSegmentedColormap): return _handle_linear_segmented_color_map(cmap, data) assert isinstance( cmap, mpl.colors.ListedColormap ), "Only LinearSegmentedColormap and ListedColormap are supported" return _handle_listed_color_map(cmap, data)
python
def _mpl_cmap2pgf_cmap(cmap, data): """Converts a color map as given in matplotlib to a color map as represented in PGFPlots. """ if isinstance(cmap, mpl.colors.LinearSegmentedColormap): return _handle_linear_segmented_color_map(cmap, data) assert isinstance( cmap, mpl.colors.ListedColormap ), "Only LinearSegmentedColormap and ListedColormap are supported" return _handle_listed_color_map(cmap, data)
[ "def", "_mpl_cmap2pgf_cmap", "(", "cmap", ",", "data", ")", ":", "if", "isinstance", "(", "cmap", ",", "mpl", ".", "colors", ".", "LinearSegmentedColormap", ")", ":", "return", "_handle_linear_segmented_color_map", "(", "cmap", ",", "data", ")", "assert", "isinstance", "(", "cmap", ",", "mpl", ".", "colors", ".", "ListedColormap", ")", ",", "\"Only LinearSegmentedColormap and ListedColormap are supported\"", "return", "_handle_listed_color_map", "(", "cmap", ",", "data", ")" ]
Converts a color map as given in matplotlib to a color map as represented in PGFPlots.
[ "Converts", "a", "color", "map", "as", "given", "in", "matplotlib", "to", "a", "color", "map", "as", "represented", "in", "PGFPlots", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L608-L618
233,155
nschloe/matplotlib2tikz
matplotlib2tikz/axes.py
_scale_to_int
def _scale_to_int(X, max_val=None): """ Scales the array X such that it contains only integers. """ if max_val is None: X = X / _gcd_array(X) else: X = X / max(1 / max_val, _gcd_array(X)) return [int(entry) for entry in X]
python
def _scale_to_int(X, max_val=None): """ Scales the array X such that it contains only integers. """ if max_val is None: X = X / _gcd_array(X) else: X = X / max(1 / max_val, _gcd_array(X)) return [int(entry) for entry in X]
[ "def", "_scale_to_int", "(", "X", ",", "max_val", "=", "None", ")", ":", "if", "max_val", "is", "None", ":", "X", "=", "X", "/", "_gcd_array", "(", "X", ")", "else", ":", "X", "=", "X", "/", "max", "(", "1", "/", "max_val", ",", "_gcd_array", "(", "X", ")", ")", "return", "[", "int", "(", "entry", ")", "for", "entry", "in", "X", "]" ]
Scales the array X such that it contains only integers.
[ "Scales", "the", "array", "X", "such", "that", "it", "contains", "only", "integers", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L771-L780
233,156
nschloe/matplotlib2tikz
matplotlib2tikz/axes.py
_gcd_array
def _gcd_array(X): """ Return the largest real value h such that all elements in x are integer multiples of h. """ greatest_common_divisor = 0.0 for x in X: greatest_common_divisor = _gcd(greatest_common_divisor, x) return greatest_common_divisor
python
def _gcd_array(X): """ Return the largest real value h such that all elements in x are integer multiples of h. """ greatest_common_divisor = 0.0 for x in X: greatest_common_divisor = _gcd(greatest_common_divisor, x) return greatest_common_divisor
[ "def", "_gcd_array", "(", "X", ")", ":", "greatest_common_divisor", "=", "0.0", "for", "x", "in", "X", ":", "greatest_common_divisor", "=", "_gcd", "(", "greatest_common_divisor", ",", "x", ")", "return", "greatest_common_divisor" ]
Return the largest real value h such that all elements in x are integer multiples of h.
[ "Return", "the", "largest", "real", "value", "h", "such", "that", "all", "elements", "in", "x", "are", "integer", "multiples", "of", "h", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L783-L792
233,157
nschloe/matplotlib2tikz
matplotlib2tikz/files.py
new_filename
def new_filename(data, file_kind, ext): """Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code. """ nb_key = file_kind + "number" if nb_key not in data.keys(): data[nb_key] = -1 if not data["override externals"]: # Make sure not to overwrite anything. file_exists = True while file_exists: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) file_exists = os.path.isfile(filename) else: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) if data["rel data path"]: rel_filepath = posixpath.join(data["rel data path"], name) else: rel_filepath = name return filename, rel_filepath
python
def new_filename(data, file_kind, ext): """Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code. """ nb_key = file_kind + "number" if nb_key not in data.keys(): data[nb_key] = -1 if not data["override externals"]: # Make sure not to overwrite anything. file_exists = True while file_exists: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) file_exists = os.path.isfile(filename) else: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) if data["rel data path"]: rel_filepath = posixpath.join(data["rel data path"], name) else: rel_filepath = name return filename, rel_filepath
[ "def", "new_filename", "(", "data", ",", "file_kind", ",", "ext", ")", ":", "nb_key", "=", "file_kind", "+", "\"number\"", "if", "nb_key", "not", "in", "data", ".", "keys", "(", ")", ":", "data", "[", "nb_key", "]", "=", "-", "1", "if", "not", "data", "[", "\"override externals\"", "]", ":", "# Make sure not to overwrite anything.", "file_exists", "=", "True", "while", "file_exists", ":", "data", "[", "nb_key", "]", "=", "data", "[", "nb_key", "]", "+", "1", "filename", ",", "name", "=", "_gen_filename", "(", "data", ",", "nb_key", ",", "ext", ")", "file_exists", "=", "os", ".", "path", ".", "isfile", "(", "filename", ")", "else", ":", "data", "[", "nb_key", "]", "=", "data", "[", "nb_key", "]", "+", "1", "filename", ",", "name", "=", "_gen_filename", "(", "data", ",", "nb_key", ",", "ext", ")", "if", "data", "[", "\"rel data path\"", "]", ":", "rel_filepath", "=", "posixpath", ".", "join", "(", "data", "[", "\"rel data path\"", "]", ",", "name", ")", "else", ":", "rel_filepath", "=", "name", "return", "filename", ",", "rel_filepath" ]
Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code.
[ "Returns", "an", "available", "filename", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/files.py#L12-L47
233,158
nschloe/matplotlib2tikz
matplotlib2tikz/path.py
mpl_linestyle2pgfplots_linestyle
def mpl_linestyle2pgfplots_linestyle(line_style, line=None): """Translates a line style of matplotlib to the corresponding style in PGFPlots. """ # linestyle is a string or dash tuple. Legal string values are # solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq # is an even length tuple of on and off ink in points. # # solid: [(None, None), (None, None), ..., (None, None)] # dashed: (0, (6.0, 6.0)) # dotted: (0, (1.0, 3.0)) # dashdot: (0, (3.0, 5.0, 1.0, 5.0)) if isinstance(line_style, tuple): if line_style[0] is None: return None if len(line_style[1]) == 2: return "dash pattern=on {}pt off {}pt".format(*line_style[1]) assert len(line_style[1]) == 4 return "dash pattern=on {}pt off {}pt on {}pt off {}pt".format(*line_style[1]) if isinstance(line, mpl.lines.Line2D) and line.is_dashed(): # see matplotlib.lines.Line2D.set_dashes # get defaults default_dashOffset, default_dashSeq = mpl.lines._get_dash_pattern(line_style) # get dash format of line under test dashSeq = line._us_dashSeq dashOffset = line._us_dashOffset lst = list() if dashSeq != default_dashSeq: # generate own dash sequence format_string = " ".join(len(dashSeq) // 2 * ["on {}pt off {}pt"]) lst.append("dash pattern=" + format_string.format(*dashSeq)) if dashOffset != default_dashOffset: lst.append("dash phase={}pt".format(dashOffset)) if len(lst) > 0: return ", ".join(lst) return { "": None, "None": None, "none": None, # happens when using plt.boxplot() "-": "solid", "solid": "solid", ":": "dotted", "--": "dashed", "-.": "dash pattern=on 1pt off 3pt on 3pt off 3pt", }[line_style]
python
def mpl_linestyle2pgfplots_linestyle(line_style, line=None): """Translates a line style of matplotlib to the corresponding style in PGFPlots. """ # linestyle is a string or dash tuple. Legal string values are # solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq # is an even length tuple of on and off ink in points. # # solid: [(None, None), (None, None), ..., (None, None)] # dashed: (0, (6.0, 6.0)) # dotted: (0, (1.0, 3.0)) # dashdot: (0, (3.0, 5.0, 1.0, 5.0)) if isinstance(line_style, tuple): if line_style[0] is None: return None if len(line_style[1]) == 2: return "dash pattern=on {}pt off {}pt".format(*line_style[1]) assert len(line_style[1]) == 4 return "dash pattern=on {}pt off {}pt on {}pt off {}pt".format(*line_style[1]) if isinstance(line, mpl.lines.Line2D) and line.is_dashed(): # see matplotlib.lines.Line2D.set_dashes # get defaults default_dashOffset, default_dashSeq = mpl.lines._get_dash_pattern(line_style) # get dash format of line under test dashSeq = line._us_dashSeq dashOffset = line._us_dashOffset lst = list() if dashSeq != default_dashSeq: # generate own dash sequence format_string = " ".join(len(dashSeq) // 2 * ["on {}pt off {}pt"]) lst.append("dash pattern=" + format_string.format(*dashSeq)) if dashOffset != default_dashOffset: lst.append("dash phase={}pt".format(dashOffset)) if len(lst) > 0: return ", ".join(lst) return { "": None, "None": None, "none": None, # happens when using plt.boxplot() "-": "solid", "solid": "solid", ":": "dotted", "--": "dashed", "-.": "dash pattern=on 1pt off 3pt on 3pt off 3pt", }[line_style]
[ "def", "mpl_linestyle2pgfplots_linestyle", "(", "line_style", ",", "line", "=", "None", ")", ":", "# linestyle is a string or dash tuple. Legal string values are", "# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq", "# is an even length tuple of on and off ink in points.", "#", "# solid: [(None, None), (None, None), ..., (None, None)]", "# dashed: (0, (6.0, 6.0))", "# dotted: (0, (1.0, 3.0))", "# dashdot: (0, (3.0, 5.0, 1.0, 5.0))", "if", "isinstance", "(", "line_style", ",", "tuple", ")", ":", "if", "line_style", "[", "0", "]", "is", "None", ":", "return", "None", "if", "len", "(", "line_style", "[", "1", "]", ")", "==", "2", ":", "return", "\"dash pattern=on {}pt off {}pt\"", ".", "format", "(", "*", "line_style", "[", "1", "]", ")", "assert", "len", "(", "line_style", "[", "1", "]", ")", "==", "4", "return", "\"dash pattern=on {}pt off {}pt on {}pt off {}pt\"", ".", "format", "(", "*", "line_style", "[", "1", "]", ")", "if", "isinstance", "(", "line", ",", "mpl", ".", "lines", ".", "Line2D", ")", "and", "line", ".", "is_dashed", "(", ")", ":", "# see matplotlib.lines.Line2D.set_dashes", "# get defaults", "default_dashOffset", ",", "default_dashSeq", "=", "mpl", ".", "lines", ".", "_get_dash_pattern", "(", "line_style", ")", "# get dash format of line under test", "dashSeq", "=", "line", ".", "_us_dashSeq", "dashOffset", "=", "line", ".", "_us_dashOffset", "lst", "=", "list", "(", ")", "if", "dashSeq", "!=", "default_dashSeq", ":", "# generate own dash sequence", "format_string", "=", "\" \"", ".", "join", "(", "len", "(", "dashSeq", ")", "//", "2", "*", "[", "\"on {}pt off {}pt\"", "]", ")", "lst", ".", "append", "(", "\"dash pattern=\"", "+", "format_string", ".", "format", "(", "*", "dashSeq", ")", ")", "if", "dashOffset", "!=", "default_dashOffset", ":", "lst", ".", "append", "(", "\"dash phase={}pt\"", ".", "format", "(", "dashOffset", ")", ")", "if", "len", "(", "lst", ")", ">", "0", ":", "return", "\", \"", ".", "join", "(", "lst", ")", "return", "{", "\"\"", ":", "None", ",", "\"None\"", ":", "None", ",", "\"none\"", ":", "None", ",", "# happens when using plt.boxplot()", "\"-\"", ":", "\"solid\"", ",", "\"solid\"", ":", "\"solid\"", ",", "\":\"", ":", "\"dotted\"", ",", "\"--\"", ":", "\"dashed\"", ",", "\"-.\"", ":", "\"dash pattern=on 1pt off 3pt on 3pt off 3pt\"", ",", "}", "[", "line_style", "]" ]
Translates a line style of matplotlib to the corresponding style in PGFPlots.
[ "Translates", "a", "line", "style", "of", "matplotlib", "to", "the", "corresponding", "style", "in", "PGFPlots", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/path.py#L296-L349
233,159
nschloe/matplotlib2tikz
matplotlib2tikz/quadmesh.py
draw_quadmesh
def draw_quadmesh(data, obj): """Returns the PGFPlots code for an graphics environment holding a rendering of the object. """ content = [] # Generate file name for current object filename, rel_filepath = files.new_filename(data, "img", ".png") # Get the dpi for rendering and store the original dpi of the figure dpi = data["dpi"] fig_dpi = obj.figure.get_dpi() obj.figure.set_dpi(dpi) # Render the object and save as png file from matplotlib.backends.backend_agg import RendererAgg cbox = obj.get_clip_box() width = int(round(cbox.extents[2])) height = int(round(cbox.extents[3])) ren = RendererAgg(width, height, dpi) obj.draw(ren) # Generate a image from the render buffer image = Image.frombuffer( "RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1 ) # Crop the image to the actual content (removing the the regions otherwise # used for axes, etc.) # 'image.crop' expects the crop box to specify the left, upper, right, and # lower pixel. 'cbox.extents' gives the left, lower, right, and upper # pixel. box = ( int(round(cbox.extents[0])), 0, int(round(cbox.extents[2])), int(round(cbox.extents[3] - cbox.extents[1])), ) cropped = image.crop(box) cropped.save(filename) # Restore the original dpi of the figure obj.figure.set_dpi(fig_dpi) # write the corresponding information to the TikZ file extent = obj.axes.get_xlim() + obj.axes.get_ylim() # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
python
def draw_quadmesh(data, obj): """Returns the PGFPlots code for an graphics environment holding a rendering of the object. """ content = [] # Generate file name for current object filename, rel_filepath = files.new_filename(data, "img", ".png") # Get the dpi for rendering and store the original dpi of the figure dpi = data["dpi"] fig_dpi = obj.figure.get_dpi() obj.figure.set_dpi(dpi) # Render the object and save as png file from matplotlib.backends.backend_agg import RendererAgg cbox = obj.get_clip_box() width = int(round(cbox.extents[2])) height = int(round(cbox.extents[3])) ren = RendererAgg(width, height, dpi) obj.draw(ren) # Generate a image from the render buffer image = Image.frombuffer( "RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1 ) # Crop the image to the actual content (removing the the regions otherwise # used for axes, etc.) # 'image.crop' expects the crop box to specify the left, upper, right, and # lower pixel. 'cbox.extents' gives the left, lower, right, and upper # pixel. box = ( int(round(cbox.extents[0])), 0, int(round(cbox.extents[2])), int(round(cbox.extents[3] - cbox.extents[1])), ) cropped = image.crop(box) cropped.save(filename) # Restore the original dpi of the figure obj.figure.set_dpi(fig_dpi) # write the corresponding information to the TikZ file extent = obj.axes.get_xlim() + obj.axes.get_ylim() # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
[ "def", "draw_quadmesh", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "# Generate file name for current object", "filename", ",", "rel_filepath", "=", "files", ".", "new_filename", "(", "data", ",", "\"img\"", ",", "\".png\"", ")", "# Get the dpi for rendering and store the original dpi of the figure", "dpi", "=", "data", "[", "\"dpi\"", "]", "fig_dpi", "=", "obj", ".", "figure", ".", "get_dpi", "(", ")", "obj", ".", "figure", ".", "set_dpi", "(", "dpi", ")", "# Render the object and save as png file", "from", "matplotlib", ".", "backends", ".", "backend_agg", "import", "RendererAgg", "cbox", "=", "obj", ".", "get_clip_box", "(", ")", "width", "=", "int", "(", "round", "(", "cbox", ".", "extents", "[", "2", "]", ")", ")", "height", "=", "int", "(", "round", "(", "cbox", ".", "extents", "[", "3", "]", ")", ")", "ren", "=", "RendererAgg", "(", "width", ",", "height", ",", "dpi", ")", "obj", ".", "draw", "(", "ren", ")", "# Generate a image from the render buffer", "image", "=", "Image", ".", "frombuffer", "(", "\"RGBA\"", ",", "ren", ".", "get_canvas_width_height", "(", ")", ",", "ren", ".", "buffer_rgba", "(", ")", ",", "\"raw\"", ",", "\"RGBA\"", ",", "0", ",", "1", ")", "# Crop the image to the actual content (removing the the regions otherwise", "# used for axes, etc.)", "# 'image.crop' expects the crop box to specify the left, upper, right, and", "# lower pixel. 'cbox.extents' gives the left, lower, right, and upper", "# pixel.", "box", "=", "(", "int", "(", "round", "(", "cbox", ".", "extents", "[", "0", "]", ")", ")", ",", "0", ",", "int", "(", "round", "(", "cbox", ".", "extents", "[", "2", "]", ")", ")", ",", "int", "(", "round", "(", "cbox", ".", "extents", "[", "3", "]", "-", "cbox", ".", "extents", "[", "1", "]", ")", ")", ",", ")", "cropped", "=", "image", ".", "crop", "(", "box", ")", "cropped", ".", "save", "(", "filename", ")", "# Restore the original dpi of the figure", "obj", ".", "figure", ".", "set_dpi", "(", "fig_dpi", ")", "# write the corresponding information to the TikZ file", "extent", "=", "obj", ".", "axes", ".", "get_xlim", "(", ")", "+", "obj", ".", "axes", ".", "get_ylim", "(", ")", "# Explicitly use \\pgfimage as includegrapics command, as the default", "# \\includegraphics fails unexpectedly in some cases", "ff", "=", "data", "[", "\"float format\"", "]", "content", ".", "append", "(", "(", "\"\\\\addplot graphics [includegraphics cmd=\\\\pgfimage,\"", "\"xmin=\"", "+", "ff", "+", "\", xmax=\"", "+", "ff", "+", "\", \"", "\"ymin=\"", "+", "ff", "+", "\", ymax=\"", "+", "ff", "+", "\"] {{{}}};\\n\"", ")", ".", "format", "(", "*", "(", "extent", "+", "(", "rel_filepath", ",", ")", ")", ")", ")", "return", "data", ",", "content" ]
Returns the PGFPlots code for an graphics environment holding a rendering of the object.
[ "Returns", "the", "PGFPlots", "code", "for", "an", "graphics", "environment", "holding", "a", "rendering", "of", "the", "object", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/quadmesh.py#L8-L66
233,160
nschloe/matplotlib2tikz
matplotlib2tikz/color.py
mpl_color2xcolor
def mpl_color2xcolor(data, matplotlib_color): """Translates a matplotlib color specification into a proper LaTeX xcolor. """ # Convert it to RGBA. my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color)) # If the alpha channel is exactly 0, then the color is really 'none' # regardless of the RGB channels. if my_col[-1] == 0.0: return data, "none", my_col xcol = None # RGB values (as taken from xcolor.dtx): available_colors = { # List white first such that for gray values, the combination # white!<x>!black is preferred over, e.g., gray!<y>!black. Note that # the order of the dictionary is respected from Python 3.6 on. "white": numpy.array([1, 1, 1]), "lightgray": numpy.array([0.75, 0.75, 0.75]), "gray": numpy.array([0.5, 0.5, 0.5]), "darkgray": numpy.array([0.25, 0.25, 0.25]), "black": numpy.array([0, 0, 0]), # "red": numpy.array([1, 0, 0]), "green": numpy.array([0, 1, 0]), "blue": numpy.array([0, 0, 1]), "brown": numpy.array([0.75, 0.5, 0.25]), "lime": numpy.array([0.75, 1, 0]), "orange": numpy.array([1, 0.5, 0]), "pink": numpy.array([1, 0.75, 0.75]), "purple": numpy.array([0.75, 0, 0.25]), "teal": numpy.array([0, 0.5, 0.5]), "violet": numpy.array([0.5, 0, 0.5]), # The colors cyan, magenta, yellow, and olive are also # predefined by xcolor, but their RGB approximation of the # native CMYK values is not very good. Don't use them here. } available_colors.update(data["custom colors"]) # Check if it exactly matches any of the colors already available. # This case is actually treated below (alpha==1), but that loop # may pick up combinations with black before finding the exact # match. Hence, first check all colors. for name, rgb in available_colors.items(): if all(my_col[:3] == rgb): xcol = name return data, xcol, my_col # Check if my_col is a multiple of a predefined color and 'black'. for name, rgb in available_colors.items(): if name == "black": continue if rgb[0] != 0.0: alpha = my_col[0] / rgb[0] elif rgb[1] != 0.0: alpha = my_col[1] / rgb[1] else: assert rgb[2] != 0.0 alpha = my_col[2] / rgb[2] # The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are # already accounted for by checking in available_colors above. if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0: xcol = name + ("!{}!black".format(alpha * 100)) return data, xcol, my_col # Lookup failed, add it to the custom list. xcol = "color" + str(len(data["custom colors"])) data["custom colors"][xcol] = my_col[:3] return data, xcol, my_col
python
def mpl_color2xcolor(data, matplotlib_color): """Translates a matplotlib color specification into a proper LaTeX xcolor. """ # Convert it to RGBA. my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color)) # If the alpha channel is exactly 0, then the color is really 'none' # regardless of the RGB channels. if my_col[-1] == 0.0: return data, "none", my_col xcol = None # RGB values (as taken from xcolor.dtx): available_colors = { # List white first such that for gray values, the combination # white!<x>!black is preferred over, e.g., gray!<y>!black. Note that # the order of the dictionary is respected from Python 3.6 on. "white": numpy.array([1, 1, 1]), "lightgray": numpy.array([0.75, 0.75, 0.75]), "gray": numpy.array([0.5, 0.5, 0.5]), "darkgray": numpy.array([0.25, 0.25, 0.25]), "black": numpy.array([0, 0, 0]), # "red": numpy.array([1, 0, 0]), "green": numpy.array([0, 1, 0]), "blue": numpy.array([0, 0, 1]), "brown": numpy.array([0.75, 0.5, 0.25]), "lime": numpy.array([0.75, 1, 0]), "orange": numpy.array([1, 0.5, 0]), "pink": numpy.array([1, 0.75, 0.75]), "purple": numpy.array([0.75, 0, 0.25]), "teal": numpy.array([0, 0.5, 0.5]), "violet": numpy.array([0.5, 0, 0.5]), # The colors cyan, magenta, yellow, and olive are also # predefined by xcolor, but their RGB approximation of the # native CMYK values is not very good. Don't use them here. } available_colors.update(data["custom colors"]) # Check if it exactly matches any of the colors already available. # This case is actually treated below (alpha==1), but that loop # may pick up combinations with black before finding the exact # match. Hence, first check all colors. for name, rgb in available_colors.items(): if all(my_col[:3] == rgb): xcol = name return data, xcol, my_col # Check if my_col is a multiple of a predefined color and 'black'. for name, rgb in available_colors.items(): if name == "black": continue if rgb[0] != 0.0: alpha = my_col[0] / rgb[0] elif rgb[1] != 0.0: alpha = my_col[1] / rgb[1] else: assert rgb[2] != 0.0 alpha = my_col[2] / rgb[2] # The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are # already accounted for by checking in available_colors above. if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0: xcol = name + ("!{}!black".format(alpha * 100)) return data, xcol, my_col # Lookup failed, add it to the custom list. xcol = "color" + str(len(data["custom colors"])) data["custom colors"][xcol] = my_col[:3] return data, xcol, my_col
[ "def", "mpl_color2xcolor", "(", "data", ",", "matplotlib_color", ")", ":", "# Convert it to RGBA.", "my_col", "=", "numpy", ".", "array", "(", "mpl", ".", "colors", ".", "ColorConverter", "(", ")", ".", "to_rgba", "(", "matplotlib_color", ")", ")", "# If the alpha channel is exactly 0, then the color is really 'none'", "# regardless of the RGB channels.", "if", "my_col", "[", "-", "1", "]", "==", "0.0", ":", "return", "data", ",", "\"none\"", ",", "my_col", "xcol", "=", "None", "# RGB values (as taken from xcolor.dtx):", "available_colors", "=", "{", "# List white first such that for gray values, the combination", "# white!<x>!black is preferred over, e.g., gray!<y>!black. Note that", "# the order of the dictionary is respected from Python 3.6 on.", "\"white\"", ":", "numpy", ".", "array", "(", "[", "1", ",", "1", ",", "1", "]", ")", ",", "\"lightgray\"", ":", "numpy", ".", "array", "(", "[", "0.75", ",", "0.75", ",", "0.75", "]", ")", ",", "\"gray\"", ":", "numpy", ".", "array", "(", "[", "0.5", ",", "0.5", ",", "0.5", "]", ")", ",", "\"darkgray\"", ":", "numpy", ".", "array", "(", "[", "0.25", ",", "0.25", ",", "0.25", "]", ")", ",", "\"black\"", ":", "numpy", ".", "array", "(", "[", "0", ",", "0", ",", "0", "]", ")", ",", "#", "\"red\"", ":", "numpy", ".", "array", "(", "[", "1", ",", "0", ",", "0", "]", ")", ",", "\"green\"", ":", "numpy", ".", "array", "(", "[", "0", ",", "1", ",", "0", "]", ")", ",", "\"blue\"", ":", "numpy", ".", "array", "(", "[", "0", ",", "0", ",", "1", "]", ")", ",", "\"brown\"", ":", "numpy", ".", "array", "(", "[", "0.75", ",", "0.5", ",", "0.25", "]", ")", ",", "\"lime\"", ":", "numpy", ".", "array", "(", "[", "0.75", ",", "1", ",", "0", "]", ")", ",", "\"orange\"", ":", "numpy", ".", "array", "(", "[", "1", ",", "0.5", ",", "0", "]", ")", ",", "\"pink\"", ":", "numpy", ".", "array", "(", "[", "1", ",", "0.75", ",", "0.75", "]", ")", ",", "\"purple\"", ":", "numpy", ".", "array", "(", "[", "0.75", ",", "0", ",", "0.25", "]", ")", ",", "\"teal\"", ":", "numpy", ".", "array", "(", "[", "0", ",", "0.5", ",", "0.5", "]", ")", ",", "\"violet\"", ":", "numpy", ".", "array", "(", "[", "0.5", ",", "0", ",", "0.5", "]", ")", ",", "# The colors cyan, magenta, yellow, and olive are also", "# predefined by xcolor, but their RGB approximation of the", "# native CMYK values is not very good. Don't use them here.", "}", "available_colors", ".", "update", "(", "data", "[", "\"custom colors\"", "]", ")", "# Check if it exactly matches any of the colors already available.", "# This case is actually treated below (alpha==1), but that loop", "# may pick up combinations with black before finding the exact", "# match. Hence, first check all colors.", "for", "name", ",", "rgb", "in", "available_colors", ".", "items", "(", ")", ":", "if", "all", "(", "my_col", "[", ":", "3", "]", "==", "rgb", ")", ":", "xcol", "=", "name", "return", "data", ",", "xcol", ",", "my_col", "# Check if my_col is a multiple of a predefined color and 'black'.", "for", "name", ",", "rgb", "in", "available_colors", ".", "items", "(", ")", ":", "if", "name", "==", "\"black\"", ":", "continue", "if", "rgb", "[", "0", "]", "!=", "0.0", ":", "alpha", "=", "my_col", "[", "0", "]", "/", "rgb", "[", "0", "]", "elif", "rgb", "[", "1", "]", "!=", "0.0", ":", "alpha", "=", "my_col", "[", "1", "]", "/", "rgb", "[", "1", "]", "else", ":", "assert", "rgb", "[", "2", "]", "!=", "0.0", "alpha", "=", "my_col", "[", "2", "]", "/", "rgb", "[", "2", "]", "# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are", "# already accounted for by checking in available_colors above.", "if", "all", "(", "my_col", "[", ":", "3", "]", "==", "alpha", "*", "rgb", ")", "and", "0.0", "<", "alpha", "<", "1.0", ":", "xcol", "=", "name", "+", "(", "\"!{}!black\"", ".", "format", "(", "alpha", "*", "100", ")", ")", "return", "data", ",", "xcol", ",", "my_col", "# Lookup failed, add it to the custom list.", "xcol", "=", "\"color\"", "+", "str", "(", "len", "(", "data", "[", "\"custom colors\"", "]", ")", ")", "data", "[", "\"custom colors\"", "]", "[", "xcol", "]", "=", "my_col", "[", ":", "3", "]", "return", "data", ",", "xcol", ",", "my_col" ]
Translates a matplotlib color specification into a proper LaTeX xcolor.
[ "Translates", "a", "matplotlib", "color", "specification", "into", "a", "proper", "LaTeX", "xcolor", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/color.py#L9-L81
233,161
nschloe/matplotlib2tikz
matplotlib2tikz/patch.py
draw_patch
def draw_patch(data, obj): """Return the PGFPlots code for patches. """ # Gather the draw options. data, draw_options = mypath.get_draw_options( data, obj, obj.get_edgecolor(), obj.get_facecolor(), obj.get_linestyle(), obj.get_linewidth(), ) if isinstance(obj, mpl.patches.Rectangle): # rectangle specialization return _draw_rectangle(data, obj, draw_options) elif isinstance(obj, mpl.patches.Ellipse): # ellipse specialization return _draw_ellipse(data, obj, draw_options) # regular patch data, path_command, _, _ = mypath.draw_path( data, obj.get_path(), draw_options=draw_options ) return data, path_command
python
def draw_patch(data, obj): """Return the PGFPlots code for patches. """ # Gather the draw options. data, draw_options = mypath.get_draw_options( data, obj, obj.get_edgecolor(), obj.get_facecolor(), obj.get_linestyle(), obj.get_linewidth(), ) if isinstance(obj, mpl.patches.Rectangle): # rectangle specialization return _draw_rectangle(data, obj, draw_options) elif isinstance(obj, mpl.patches.Ellipse): # ellipse specialization return _draw_ellipse(data, obj, draw_options) # regular patch data, path_command, _, _ = mypath.draw_path( data, obj.get_path(), draw_options=draw_options ) return data, path_command
[ "def", "draw_patch", "(", "data", ",", "obj", ")", ":", "# Gather the draw options.", "data", ",", "draw_options", "=", "mypath", ".", "get_draw_options", "(", "data", ",", "obj", ",", "obj", ".", "get_edgecolor", "(", ")", ",", "obj", ".", "get_facecolor", "(", ")", ",", "obj", ".", "get_linestyle", "(", ")", ",", "obj", ".", "get_linewidth", "(", ")", ",", ")", "if", "isinstance", "(", "obj", ",", "mpl", ".", "patches", ".", "Rectangle", ")", ":", "# rectangle specialization", "return", "_draw_rectangle", "(", "data", ",", "obj", ",", "draw_options", ")", "elif", "isinstance", "(", "obj", ",", "mpl", ".", "patches", ".", "Ellipse", ")", ":", "# ellipse specialization", "return", "_draw_ellipse", "(", "data", ",", "obj", ",", "draw_options", ")", "# regular patch", "data", ",", "path_command", ",", "_", ",", "_", "=", "mypath", ".", "draw_path", "(", "data", ",", "obj", ".", "get_path", "(", ")", ",", "draw_options", "=", "draw_options", ")", "return", "data", ",", "path_command" ]
Return the PGFPlots code for patches.
[ "Return", "the", "PGFPlots", "code", "for", "patches", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L8-L32
233,162
nschloe/matplotlib2tikz
matplotlib2tikz/patch.py
_draw_rectangle
def _draw_rectangle(data, obj, draw_options): """Return the PGFPlots code for rectangles. """ # Objects with labels are plot objects (from bar charts, etc). Even those without # labels explicitly set have a label of "_nolegend_". Everything else should be # skipped because they likely correspong to axis/legend objects which are handled by # PGFPlots label = obj.get_label() if label == "": return data, [] # Get actual label, bar charts by default only give rectangles labels of # "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>. handles, labels = obj.axes.get_legend_handles_labels() labelsFound = [ label for h, label in zip(handles, labels) if obj in h.get_children() ] if len(labelsFound) == 1: label = labelsFound[0] left_lower_x = obj.get_x() left_lower_y = obj.get_y() ff = data["float format"] cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") " "rectangle (axis cs:" + ff + "," + ff + ");\n" ).format( ",".join(draw_options), left_lower_x, left_lower_y, left_lower_x + obj.get_width(), left_lower_y + obj.get_height(), ) if label != "_nolegend_" and label not in data["rectangle_legends"]: data["rectangle_legends"].add(label) cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format( ",".join(draw_options) ) cont += "\\addlegendentry{{{}}}\n\n".format(label) return data, cont
python
def _draw_rectangle(data, obj, draw_options): """Return the PGFPlots code for rectangles. """ # Objects with labels are plot objects (from bar charts, etc). Even those without # labels explicitly set have a label of "_nolegend_". Everything else should be # skipped because they likely correspong to axis/legend objects which are handled by # PGFPlots label = obj.get_label() if label == "": return data, [] # Get actual label, bar charts by default only give rectangles labels of # "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>. handles, labels = obj.axes.get_legend_handles_labels() labelsFound = [ label for h, label in zip(handles, labels) if obj in h.get_children() ] if len(labelsFound) == 1: label = labelsFound[0] left_lower_x = obj.get_x() left_lower_y = obj.get_y() ff = data["float format"] cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") " "rectangle (axis cs:" + ff + "," + ff + ");\n" ).format( ",".join(draw_options), left_lower_x, left_lower_y, left_lower_x + obj.get_width(), left_lower_y + obj.get_height(), ) if label != "_nolegend_" and label not in data["rectangle_legends"]: data["rectangle_legends"].add(label) cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format( ",".join(draw_options) ) cont += "\\addlegendentry{{{}}}\n\n".format(label) return data, cont
[ "def", "_draw_rectangle", "(", "data", ",", "obj", ",", "draw_options", ")", ":", "# Objects with labels are plot objects (from bar charts, etc). Even those without", "# labels explicitly set have a label of \"_nolegend_\". Everything else should be", "# skipped because they likely correspong to axis/legend objects which are handled by", "# PGFPlots", "label", "=", "obj", ".", "get_label", "(", ")", "if", "label", "==", "\"\"", ":", "return", "data", ",", "[", "]", "# Get actual label, bar charts by default only give rectangles labels of", "# \"_nolegend_\". See <https://stackoverflow.com/q/35881290/353337>.", "handles", ",", "labels", "=", "obj", ".", "axes", ".", "get_legend_handles_labels", "(", ")", "labelsFound", "=", "[", "label", "for", "h", ",", "label", "in", "zip", "(", "handles", ",", "labels", ")", "if", "obj", "in", "h", ".", "get_children", "(", ")", "]", "if", "len", "(", "labelsFound", ")", "==", "1", ":", "label", "=", "labelsFound", "[", "0", "]", "left_lower_x", "=", "obj", ".", "get_x", "(", ")", "left_lower_y", "=", "obj", ".", "get_y", "(", ")", "ff", "=", "data", "[", "\"float format\"", "]", "cont", "=", "(", "\"\\\\draw[{}] (axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\") \"", "\"rectangle (axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\");\\n\"", ")", ".", "format", "(", "\",\"", ".", "join", "(", "draw_options", ")", ",", "left_lower_x", ",", "left_lower_y", ",", "left_lower_x", "+", "obj", ".", "get_width", "(", ")", ",", "left_lower_y", "+", "obj", ".", "get_height", "(", ")", ",", ")", "if", "label", "!=", "\"_nolegend_\"", "and", "label", "not", "in", "data", "[", "\"rectangle_legends\"", "]", ":", "data", "[", "\"rectangle_legends\"", "]", ".", "add", "(", "label", ")", "cont", "+=", "\"\\\\addlegendimage{{ybar,ybar legend,{}}};\\n\"", ".", "format", "(", "\",\"", ".", "join", "(", "draw_options", ")", ")", "cont", "+=", "\"\\\\addlegendentry{{{}}}\\n\\n\"", ".", "format", "(", "label", ")", "return", "data", ",", "cont" ]
Return the PGFPlots code for rectangles.
[ "Return", "the", "PGFPlots", "code", "for", "rectangles", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L91-L131
233,163
nschloe/matplotlib2tikz
matplotlib2tikz/patch.py
_draw_ellipse
def _draw_ellipse(data, obj, draw_options): """Return the PGFPlots code for ellipses. """ if isinstance(obj, mpl.patches.Circle): # circle specialization return _draw_circle(data, obj, draw_options) x, y = obj.center ff = data["float format"] if obj.angle != 0: fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}" draw_options.append(fmt.format(obj.angle, x, y)) cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") ellipse (" + ff + " and " + ff + ");\n" ).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height) return data, cont
python
def _draw_ellipse(data, obj, draw_options): """Return the PGFPlots code for ellipses. """ if isinstance(obj, mpl.patches.Circle): # circle specialization return _draw_circle(data, obj, draw_options) x, y = obj.center ff = data["float format"] if obj.angle != 0: fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}" draw_options.append(fmt.format(obj.angle, x, y)) cont = ( "\\draw[{}] (axis cs:" + ff + "," + ff + ") ellipse (" + ff + " and " + ff + ");\n" ).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height) return data, cont
[ "def", "_draw_ellipse", "(", "data", ",", "obj", ",", "draw_options", ")", ":", "if", "isinstance", "(", "obj", ",", "mpl", ".", "patches", ".", "Circle", ")", ":", "# circle specialization", "return", "_draw_circle", "(", "data", ",", "obj", ",", "draw_options", ")", "x", ",", "y", "=", "obj", ".", "center", "ff", "=", "data", "[", "\"float format\"", "]", "if", "obj", ".", "angle", "!=", "0", ":", "fmt", "=", "\"rotate around={{\"", "+", "ff", "+", "\":(axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\")}}\"", "draw_options", ".", "append", "(", "fmt", ".", "format", "(", "obj", ".", "angle", ",", "x", ",", "y", ")", ")", "cont", "=", "(", "\"\\\\draw[{}] (axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\") ellipse (\"", "+", "ff", "+", "\" and \"", "+", "ff", "+", "\");\\n\"", ")", ".", "format", "(", "\",\"", ".", "join", "(", "draw_options", ")", ",", "x", ",", "y", ",", "0.5", "*", "obj", ".", "width", ",", "0.5", "*", "obj", ".", "height", ")", "return", "data", ",", "cont" ]
Return the PGFPlots code for ellipses.
[ "Return", "the", "PGFPlots", "code", "for", "ellipses", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L134-L158
233,164
nschloe/matplotlib2tikz
matplotlib2tikz/patch.py
_draw_circle
def _draw_circle(data, obj, draw_options): """Return the PGFPlots code for circles. """ x, y = obj.center ff = data["float format"] cont = ("\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n").format( ",".join(draw_options), x, y, obj.get_radius() ) return data, cont
python
def _draw_circle(data, obj, draw_options): """Return the PGFPlots code for circles. """ x, y = obj.center ff = data["float format"] cont = ("\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n").format( ",".join(draw_options), x, y, obj.get_radius() ) return data, cont
[ "def", "_draw_circle", "(", "data", ",", "obj", ",", "draw_options", ")", ":", "x", ",", "y", "=", "obj", ".", "center", "ff", "=", "data", "[", "\"float format\"", "]", "cont", "=", "(", "\"\\\\draw[{}] (axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\") circle (\"", "+", "ff", "+", "\");\\n\"", ")", ".", "format", "(", "\",\"", ".", "join", "(", "draw_options", ")", ",", "x", ",", "y", ",", "obj", ".", "get_radius", "(", ")", ")", "return", "data", ",", "cont" ]
Return the PGFPlots code for circles.
[ "Return", "the", "PGFPlots", "code", "for", "circles", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L161-L169
233,165
nschloe/matplotlib2tikz
matplotlib2tikz/image.py
draw_image
def draw_image(data, obj): """Returns the PGFPlots code for an image environment. """ content = [] filename, rel_filepath = files.new_filename(data, "img", ".png") # store the image as in a file img_array = obj.get_array() dims = img_array.shape if len(dims) == 2: # the values are given as one real number: look at cmap clims = obj.get_clim() mpl.pyplot.imsave( fname=filename, arr=img_array, cmap=obj.get_cmap(), vmin=clims[0], vmax=clims[1], origin=obj.origin, ) else: # RGB (+alpha) information at each point assert len(dims) == 3 and dims[2] in [3, 4] # convert to PIL image if obj.origin == "lower": img_array = numpy.flipud(img_array) # Convert mpl image to PIL image = PIL.Image.fromarray(numpy.uint8(img_array * 255)) # If the input image is PIL: # image = PIL.Image.fromarray(img_array) image.save(filename, origin=obj.origin) # write the corresponding information to the TikZ file extent = obj.get_extent() # the format specification will only accept tuples if not isinstance(extent, tuple): extent = tuple(extent) # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
python
def draw_image(data, obj): """Returns the PGFPlots code for an image environment. """ content = [] filename, rel_filepath = files.new_filename(data, "img", ".png") # store the image as in a file img_array = obj.get_array() dims = img_array.shape if len(dims) == 2: # the values are given as one real number: look at cmap clims = obj.get_clim() mpl.pyplot.imsave( fname=filename, arr=img_array, cmap=obj.get_cmap(), vmin=clims[0], vmax=clims[1], origin=obj.origin, ) else: # RGB (+alpha) information at each point assert len(dims) == 3 and dims[2] in [3, 4] # convert to PIL image if obj.origin == "lower": img_array = numpy.flipud(img_array) # Convert mpl image to PIL image = PIL.Image.fromarray(numpy.uint8(img_array * 255)) # If the input image is PIL: # image = PIL.Image.fromarray(img_array) image.save(filename, origin=obj.origin) # write the corresponding information to the TikZ file extent = obj.get_extent() # the format specification will only accept tuples if not isinstance(extent, tuple): extent = tuple(extent) # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
[ "def", "draw_image", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "filename", ",", "rel_filepath", "=", "files", ".", "new_filename", "(", "data", ",", "\"img\"", ",", "\".png\"", ")", "# store the image as in a file", "img_array", "=", "obj", ".", "get_array", "(", ")", "dims", "=", "img_array", ".", "shape", "if", "len", "(", "dims", ")", "==", "2", ":", "# the values are given as one real number: look at cmap", "clims", "=", "obj", ".", "get_clim", "(", ")", "mpl", ".", "pyplot", ".", "imsave", "(", "fname", "=", "filename", ",", "arr", "=", "img_array", ",", "cmap", "=", "obj", ".", "get_cmap", "(", ")", ",", "vmin", "=", "clims", "[", "0", "]", ",", "vmax", "=", "clims", "[", "1", "]", ",", "origin", "=", "obj", ".", "origin", ",", ")", "else", ":", "# RGB (+alpha) information at each point", "assert", "len", "(", "dims", ")", "==", "3", "and", "dims", "[", "2", "]", "in", "[", "3", ",", "4", "]", "# convert to PIL image", "if", "obj", ".", "origin", "==", "\"lower\"", ":", "img_array", "=", "numpy", ".", "flipud", "(", "img_array", ")", "# Convert mpl image to PIL", "image", "=", "PIL", ".", "Image", ".", "fromarray", "(", "numpy", ".", "uint8", "(", "img_array", "*", "255", ")", ")", "# If the input image is PIL:", "# image = PIL.Image.fromarray(img_array)", "image", ".", "save", "(", "filename", ",", "origin", "=", "obj", ".", "origin", ")", "# write the corresponding information to the TikZ file", "extent", "=", "obj", ".", "get_extent", "(", ")", "# the format specification will only accept tuples", "if", "not", "isinstance", "(", "extent", ",", "tuple", ")", ":", "extent", "=", "tuple", "(", "extent", ")", "# Explicitly use \\pgfimage as includegrapics command, as the default", "# \\includegraphics fails unexpectedly in some cases", "ff", "=", "data", "[", "\"float format\"", "]", "content", ".", "append", "(", "(", "\"\\\\addplot graphics [includegraphics cmd=\\\\pgfimage,\"", "\"xmin=\"", "+", "ff", "+", "\", xmax=\"", "+", "ff", "+", "\", \"", "\"ymin=\"", "+", "ff", "+", "\", ymax=\"", "+", "ff", "+", "\"] {{{}}};\\n\"", ")", ".", "format", "(", "*", "(", "extent", "+", "(", "rel_filepath", ",", ")", ")", ")", ")", "return", "data", ",", "content" ]
Returns the PGFPlots code for an image environment.
[ "Returns", "the", "PGFPlots", "code", "for", "an", "image", "environment", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/image.py#L10-L64
233,166
nschloe/matplotlib2tikz
matplotlib2tikz/util.py
get_legend_text
def get_legend_text(obj): """Check if line is in legend. """ leg = obj.axes.get_legend() if leg is None: return None keys = [l.get_label() for l in leg.legendHandles if l is not None] values = [l.get_text() for l in leg.texts] label = obj.get_label() d = dict(zip(keys, values)) if label in d: return d[label] return None
python
def get_legend_text(obj): """Check if line is in legend. """ leg = obj.axes.get_legend() if leg is None: return None keys = [l.get_label() for l in leg.legendHandles if l is not None] values = [l.get_text() for l in leg.texts] label = obj.get_label() d = dict(zip(keys, values)) if label in d: return d[label] return None
[ "def", "get_legend_text", "(", "obj", ")", ":", "leg", "=", "obj", ".", "axes", ".", "get_legend", "(", ")", "if", "leg", "is", "None", ":", "return", "None", "keys", "=", "[", "l", ".", "get_label", "(", ")", "for", "l", "in", "leg", ".", "legendHandles", "if", "l", "is", "not", "None", "]", "values", "=", "[", "l", ".", "get_text", "(", ")", "for", "l", "in", "leg", ".", "texts", "]", "label", "=", "obj", ".", "get_label", "(", ")", "d", "=", "dict", "(", "zip", "(", "keys", ",", "values", ")", ")", "if", "label", "in", "d", ":", "return", "d", "[", "label", "]", "return", "None" ]
Check if line is in legend.
[ "Check", "if", "line", "is", "in", "legend", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/util.py#L11-L26
233,167
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
_get_color_definitions
def _get_color_definitions(data): """Returns the list of custom color definitions for the TikZ file. """ definitions = [] fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}" for name, rgb in data["custom colors"].items(): definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2])) return definitions
python
def _get_color_definitions(data): """Returns the list of custom color definitions for the TikZ file. """ definitions = [] fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}" for name, rgb in data["custom colors"].items(): definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2])) return definitions
[ "def", "_get_color_definitions", "(", "data", ")", ":", "definitions", "=", "[", "]", "fmt", "=", "\"\\\\definecolor{{{}}}{{rgb}}{{\"", "+", "\",\"", ".", "join", "(", "3", "*", "[", "data", "[", "\"float format\"", "]", "]", ")", "+", "\"}}\"", "for", "name", ",", "rgb", "in", "data", "[", "\"custom colors\"", "]", ".", "items", "(", ")", ":", "definitions", ".", "append", "(", "fmt", ".", "format", "(", "name", ",", "rgb", "[", "0", "]", ",", "rgb", "[", "1", "]", ",", "rgb", "[", "2", "]", ")", ")", "return", "definitions" ]
Returns the list of custom color definitions for the TikZ file.
[ "Returns", "the", "list", "of", "custom", "color", "definitions", "for", "the", "TikZ", "file", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L283-L290
233,168
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
_print_pgfplot_libs_message
def _print_pgfplot_libs_message(data): """Prints message to screen indicating the use of PGFPlots and its libraries.""" pgfplotslibs = ",".join(list(data["pgfplots libs"])) tikzlibs = ",".join(list(data["tikz libs"])) print(70 * "=") print("Please add the following lines to your LaTeX preamble:\n") print("\\usepackage[utf8]{inputenc}") print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX") print("\\usepackage{pgfplots}") if tikzlibs: print("\\usetikzlibrary{" + tikzlibs + "}") if pgfplotslibs: print("\\usepgfplotslibrary{" + pgfplotslibs + "}") print(70 * "=") return
python
def _print_pgfplot_libs_message(data): """Prints message to screen indicating the use of PGFPlots and its libraries.""" pgfplotslibs = ",".join(list(data["pgfplots libs"])) tikzlibs = ",".join(list(data["tikz libs"])) print(70 * "=") print("Please add the following lines to your LaTeX preamble:\n") print("\\usepackage[utf8]{inputenc}") print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX") print("\\usepackage{pgfplots}") if tikzlibs: print("\\usetikzlibrary{" + tikzlibs + "}") if pgfplotslibs: print("\\usepgfplotslibrary{" + pgfplotslibs + "}") print(70 * "=") return
[ "def", "_print_pgfplot_libs_message", "(", "data", ")", ":", "pgfplotslibs", "=", "\",\"", ".", "join", "(", "list", "(", "data", "[", "\"pgfplots libs\"", "]", ")", ")", "tikzlibs", "=", "\",\"", ".", "join", "(", "list", "(", "data", "[", "\"tikz libs\"", "]", ")", ")", "print", "(", "70", "*", "\"=\"", ")", "print", "(", "\"Please add the following lines to your LaTeX preamble:\\n\"", ")", "print", "(", "\"\\\\usepackage[utf8]{inputenc}\"", ")", "print", "(", "\"\\\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX\"", ")", "print", "(", "\"\\\\usepackage{pgfplots}\"", ")", "if", "tikzlibs", ":", "print", "(", "\"\\\\usetikzlibrary{\"", "+", "tikzlibs", "+", "\"}\"", ")", "if", "pgfplotslibs", ":", "print", "(", "\"\\\\usepgfplotslibrary{\"", "+", "pgfplotslibs", "+", "\"}\"", ")", "print", "(", "70", "*", "\"=\"", ")", "return" ]
Prints message to screen indicating the use of PGFPlots and its libraries.
[ "Prints", "message", "to", "screen", "indicating", "the", "use", "of", "PGFPlots", "and", "its", "libraries", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L293-L309
233,169
nschloe/matplotlib2tikz
matplotlib2tikz/save.py
_ContentManager.extend
def extend(self, content, zorder): """ Extends with a list and a z-order """ if zorder not in self._content: self._content[zorder] = [] self._content[zorder].extend(content)
python
def extend(self, content, zorder): """ Extends with a list and a z-order """ if zorder not in self._content: self._content[zorder] = [] self._content[zorder].extend(content)
[ "def", "extend", "(", "self", ",", "content", ",", "zorder", ")", ":", "if", "zorder", "not", "in", "self", ".", "_content", ":", "self", ".", "_content", "[", "zorder", "]", "=", "[", "]", "self", ".", "_content", "[", "zorder", "]", ".", "extend", "(", "content", ")" ]
Extends with a list and a z-order
[ "Extends", "with", "a", "list", "and", "a", "z", "-", "order" ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L322-L327
233,170
nschloe/matplotlib2tikz
matplotlib2tikz/line2d.py
draw_line2d
def draw_line2d(data, obj): """Returns the PGFPlots code for an Line2D environment. """ content = [] addplot_options = [] # If line is of length 0, do nothing. Otherwise, an empty \addplot table will be # created, which will be interpreted as an external data source in either the file # '' or '.tex'. Instead, render nothing. if len(obj.get_xdata()) == 0: return data, [] # get the linewidth (in pt) line_width = mypath.mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth()) if line_width: addplot_options.append(line_width) # get line color color = obj.get_color() data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color) addplot_options.append(line_xcolor) alpha = obj.get_alpha() if alpha is not None: addplot_options.append("opacity={}".format(alpha)) linestyle = mypath.mpl_linestyle2pgfplots_linestyle(obj.get_linestyle(), line=obj) if linestyle is not None and linestyle != "solid": addplot_options.append(linestyle) marker_face_color = obj.get_markerfacecolor() marker_edge_color = obj.get_markeredgecolor() data, marker, extra_mark_options = _mpl_marker2pgfp_marker( data, obj.get_marker(), marker_face_color ) if marker: _marker( obj, data, marker, addplot_options, extra_mark_options, marker_face_color, marker_edge_color, line_xcolor, ) if marker and linestyle is None: addplot_options.append("only marks") # Check if a line is in a legend and forget it if not. # Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>. legend_text = get_legend_text(obj) if legend_text is None and has_legend(obj.axes): addplot_options.append("forget plot") # process options content.append("\\addplot ") if addplot_options: content.append("[{}]\n".format(", ".join(addplot_options))) c, axis_options = _table(obj, data) content += c if legend_text is not None: content.append("\\addlegendentry{{{}}}\n".format(legend_text)) return data, content
python
def draw_line2d(data, obj): """Returns the PGFPlots code for an Line2D environment. """ content = [] addplot_options = [] # If line is of length 0, do nothing. Otherwise, an empty \addplot table will be # created, which will be interpreted as an external data source in either the file # '' or '.tex'. Instead, render nothing. if len(obj.get_xdata()) == 0: return data, [] # get the linewidth (in pt) line_width = mypath.mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth()) if line_width: addplot_options.append(line_width) # get line color color = obj.get_color() data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color) addplot_options.append(line_xcolor) alpha = obj.get_alpha() if alpha is not None: addplot_options.append("opacity={}".format(alpha)) linestyle = mypath.mpl_linestyle2pgfplots_linestyle(obj.get_linestyle(), line=obj) if linestyle is not None and linestyle != "solid": addplot_options.append(linestyle) marker_face_color = obj.get_markerfacecolor() marker_edge_color = obj.get_markeredgecolor() data, marker, extra_mark_options = _mpl_marker2pgfp_marker( data, obj.get_marker(), marker_face_color ) if marker: _marker( obj, data, marker, addplot_options, extra_mark_options, marker_face_color, marker_edge_color, line_xcolor, ) if marker and linestyle is None: addplot_options.append("only marks") # Check if a line is in a legend and forget it if not. # Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>. legend_text = get_legend_text(obj) if legend_text is None and has_legend(obj.axes): addplot_options.append("forget plot") # process options content.append("\\addplot ") if addplot_options: content.append("[{}]\n".format(", ".join(addplot_options))) c, axis_options = _table(obj, data) content += c if legend_text is not None: content.append("\\addlegendentry{{{}}}\n".format(legend_text)) return data, content
[ "def", "draw_line2d", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "addplot_options", "=", "[", "]", "# If line is of length 0, do nothing. Otherwise, an empty \\addplot table will be", "# created, which will be interpreted as an external data source in either the file", "# '' or '.tex'. Instead, render nothing.", "if", "len", "(", "obj", ".", "get_xdata", "(", ")", ")", "==", "0", ":", "return", "data", ",", "[", "]", "# get the linewidth (in pt)", "line_width", "=", "mypath", ".", "mpl_linewidth2pgfp_linewidth", "(", "data", ",", "obj", ".", "get_linewidth", "(", ")", ")", "if", "line_width", ":", "addplot_options", ".", "append", "(", "line_width", ")", "# get line color", "color", "=", "obj", ".", "get_color", "(", ")", "data", ",", "line_xcolor", ",", "_", "=", "mycol", ".", "mpl_color2xcolor", "(", "data", ",", "color", ")", "addplot_options", ".", "append", "(", "line_xcolor", ")", "alpha", "=", "obj", ".", "get_alpha", "(", ")", "if", "alpha", "is", "not", "None", ":", "addplot_options", ".", "append", "(", "\"opacity={}\"", ".", "format", "(", "alpha", ")", ")", "linestyle", "=", "mypath", ".", "mpl_linestyle2pgfplots_linestyle", "(", "obj", ".", "get_linestyle", "(", ")", ",", "line", "=", "obj", ")", "if", "linestyle", "is", "not", "None", "and", "linestyle", "!=", "\"solid\"", ":", "addplot_options", ".", "append", "(", "linestyle", ")", "marker_face_color", "=", "obj", ".", "get_markerfacecolor", "(", ")", "marker_edge_color", "=", "obj", ".", "get_markeredgecolor", "(", ")", "data", ",", "marker", ",", "extra_mark_options", "=", "_mpl_marker2pgfp_marker", "(", "data", ",", "obj", ".", "get_marker", "(", ")", ",", "marker_face_color", ")", "if", "marker", ":", "_marker", "(", "obj", ",", "data", ",", "marker", ",", "addplot_options", ",", "extra_mark_options", ",", "marker_face_color", ",", "marker_edge_color", ",", "line_xcolor", ",", ")", "if", "marker", "and", "linestyle", "is", "None", ":", "addplot_options", ".", "append", "(", "\"only marks\"", ")", "# Check if a line is in a legend and forget it if not.", "# Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>.", "legend_text", "=", "get_legend_text", "(", "obj", ")", "if", "legend_text", "is", "None", "and", "has_legend", "(", "obj", ".", "axes", ")", ":", "addplot_options", ".", "append", "(", "\"forget plot\"", ")", "# process options", "content", ".", "append", "(", "\"\\\\addplot \"", ")", "if", "addplot_options", ":", "content", ".", "append", "(", "\"[{}]\\n\"", ".", "format", "(", "\", \"", ".", "join", "(", "addplot_options", ")", ")", ")", "c", ",", "axis_options", "=", "_table", "(", "obj", ",", "data", ")", "content", "+=", "c", "if", "legend_text", "is", "not", "None", ":", "content", ".", "append", "(", "\"\\\\addlegendentry{{{}}}\\n\"", ".", "format", "(", "legend_text", ")", ")", "return", "data", ",", "content" ]
Returns the PGFPlots code for an Line2D environment.
[ "Returns", "the", "PGFPlots", "code", "for", "an", "Line2D", "environment", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L18-L85
233,171
nschloe/matplotlib2tikz
matplotlib2tikz/line2d.py
draw_linecollection
def draw_linecollection(data, obj): """Returns Pgfplots code for a number of patch objects. """ content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): color = edgecolors[i] if i < len(edgecolors) else edgecolors[0] style = linestyles[i] if i < len(linestyles) else linestyles[0] width = linewidths[i] if i < len(linewidths) else linewidths[0] data, options = mypath.get_draw_options(data, obj, color, None, style, width) # TODO what about masks? data, cont, _, _ = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont + "\n") return data, content
python
def draw_linecollection(data, obj): """Returns Pgfplots code for a number of patch objects. """ content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): color = edgecolors[i] if i < len(edgecolors) else edgecolors[0] style = linestyles[i] if i < len(linestyles) else linestyles[0] width = linewidths[i] if i < len(linewidths) else linewidths[0] data, options = mypath.get_draw_options(data, obj, color, None, style, width) # TODO what about masks? data, cont, _, _ = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont + "\n") return data, content
[ "def", "draw_linecollection", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "edgecolors", "=", "obj", ".", "get_edgecolors", "(", ")", "linestyles", "=", "obj", ".", "get_linestyles", "(", ")", "linewidths", "=", "obj", ".", "get_linewidths", "(", ")", "paths", "=", "obj", ".", "get_paths", "(", ")", "for", "i", ",", "path", "in", "enumerate", "(", "paths", ")", ":", "color", "=", "edgecolors", "[", "i", "]", "if", "i", "<", "len", "(", "edgecolors", ")", "else", "edgecolors", "[", "0", "]", "style", "=", "linestyles", "[", "i", "]", "if", "i", "<", "len", "(", "linestyles", ")", "else", "linestyles", "[", "0", "]", "width", "=", "linewidths", "[", "i", "]", "if", "i", "<", "len", "(", "linewidths", ")", "else", "linewidths", "[", "0", "]", "data", ",", "options", "=", "mypath", ".", "get_draw_options", "(", "data", ",", "obj", ",", "color", ",", "None", ",", "style", ",", "width", ")", "# TODO what about masks?", "data", ",", "cont", ",", "_", ",", "_", "=", "mypath", ".", "draw_path", "(", "data", ",", "path", ",", "draw_options", "=", "options", ",", "simplify", "=", "False", ")", "content", ".", "append", "(", "cont", "+", "\"\\n\"", ")", "return", "data", ",", "content" ]
Returns Pgfplots code for a number of patch objects.
[ "Returns", "Pgfplots", "code", "for", "a", "number", "of", "patch", "objects", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L88-L111
233,172
nschloe/matplotlib2tikz
matplotlib2tikz/line2d.py
_mpl_marker2pgfp_marker
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color): """Translates a marker style of matplotlib to the corresponding style in PGFPlots. """ # try default list try: pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker] except KeyError: pass else: if (marker_face_color is not None) and pgfplots_marker == "o": pgfplots_marker = "*" data["tikz libs"].add("plotmarks") marker_options = None return (data, pgfplots_marker, marker_options) # try plotmarks list try: data["tikz libs"].add("plotmarks") pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker] except KeyError: # There's no equivalent for the pixel marker (,) in Pgfplots. pass else: if ( marker_face_color is not None and ( not isinstance(marker_face_color, str) or marker_face_color.lower() != "none" ) and pgfplots_marker not in ["|", "-", "asterisk", "star"] ): pgfplots_marker += "*" return (data, pgfplots_marker, marker_options) return data, None, None
python
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color): """Translates a marker style of matplotlib to the corresponding style in PGFPlots. """ # try default list try: pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker] except KeyError: pass else: if (marker_face_color is not None) and pgfplots_marker == "o": pgfplots_marker = "*" data["tikz libs"].add("plotmarks") marker_options = None return (data, pgfplots_marker, marker_options) # try plotmarks list try: data["tikz libs"].add("plotmarks") pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker] except KeyError: # There's no equivalent for the pixel marker (,) in Pgfplots. pass else: if ( marker_face_color is not None and ( not isinstance(marker_face_color, str) or marker_face_color.lower() != "none" ) and pgfplots_marker not in ["|", "-", "asterisk", "star"] ): pgfplots_marker += "*" return (data, pgfplots_marker, marker_options) return data, None, None
[ "def", "_mpl_marker2pgfp_marker", "(", "data", ",", "mpl_marker", ",", "marker_face_color", ")", ":", "# try default list", "try", ":", "pgfplots_marker", "=", "_MP_MARKER2PGF_MARKER", "[", "mpl_marker", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "(", "marker_face_color", "is", "not", "None", ")", "and", "pgfplots_marker", "==", "\"o\"", ":", "pgfplots_marker", "=", "\"*\"", "data", "[", "\"tikz libs\"", "]", ".", "add", "(", "\"plotmarks\"", ")", "marker_options", "=", "None", "return", "(", "data", ",", "pgfplots_marker", ",", "marker_options", ")", "# try plotmarks list", "try", ":", "data", "[", "\"tikz libs\"", "]", ".", "add", "(", "\"plotmarks\"", ")", "pgfplots_marker", ",", "marker_options", "=", "_MP_MARKER2PLOTMARKS", "[", "mpl_marker", "]", "except", "KeyError", ":", "# There's no equivalent for the pixel marker (,) in Pgfplots.", "pass", "else", ":", "if", "(", "marker_face_color", "is", "not", "None", "and", "(", "not", "isinstance", "(", "marker_face_color", ",", "str", ")", "or", "marker_face_color", ".", "lower", "(", ")", "!=", "\"none\"", ")", "and", "pgfplots_marker", "not", "in", "[", "\"|\"", ",", "\"-\"", ",", "\"asterisk\"", ",", "\"star\"", "]", ")", ":", "pgfplots_marker", "+=", "\"*\"", "return", "(", "data", ",", "pgfplots_marker", ",", "marker_options", ")", "return", "data", ",", "None", ",", "None" ]
Translates a marker style of matplotlib to the corresponding style in PGFPlots.
[ "Translates", "a", "marker", "style", "of", "matplotlib", "to", "the", "corresponding", "style", "in", "PGFPlots", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L147-L182
233,173
nschloe/matplotlib2tikz
matplotlib2tikz/text.py
draw_text
def draw_text(data, obj): """Paints text on the graph. """ content = [] properties = [] style = [] if isinstance(obj, mpl.text.Annotation): _annotation(obj, data, content) # 1: coordinates # 2: properties (shapes, rotation, etc) # 3: text style # 4: the text # -------1--------2---3--4-- pos = obj.get_position() # from .util import transform_to_data_coordinates # pos = transform_to_data_coordinates(obj, *pos) text = obj.get_text() if text in ["", data["current axis title"]]: # Text nodes which are direct children of Axes are typically titles. They are # already captured by the `title` property of pgfplots axes, so skip them here. return data, content size = obj.get_size() bbox = obj.get_bbox_patch() converter = mpl.colors.ColorConverter() # without the factor 0.5, the fonts are too big most of the time. # TODO fix this scaling = 0.5 * size / data["font size"] ff = data["float format"] if scaling != 1.0: properties.append(("scale=" + ff).format(scaling)) if bbox is not None: _bbox(bbox, data, properties, scaling) ha = obj.get_ha() va = obj.get_va() anchor = _transform_positioning(ha, va) if anchor is not None: properties.append(anchor) data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color())) properties.append("text={}".format(col)) properties.append("rotate={:.1f}".format(obj.get_rotation())) if obj.get_style() == "italic": style.append("\\itshape") else: assert obj.get_style() == "normal" # From matplotlib/font_manager.py: # weight_dict = { # 'ultralight' : 100, # 'light' : 200, # 'normal' : 400, # 'regular' : 400, # 'book' : 400, # 'medium' : 500, # 'roman' : 500, # 'semibold' : 600, # 'demibold' : 600, # 'demi' : 600, # 'bold' : 700, # 'heavy' : 800, # 'extra bold' : 800, # 'black' : 900} # # get_weights returns a numeric value in the range 0-1000 or one of # ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’, # ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’ weight = obj.get_weight() if weight in [ "semibold", "demibold", "demi", "bold", "heavy", "extra bold", "black", ] or (isinstance(weight, int) and weight > 550): style.append("\\bfseries") # \lfseries isn't that common yet # elif weight == 'light' or (isinstance(weight, int) and weight < 300): # style.append('\\lfseries') if obj.axes: # If the coordinates are relative to an axis, use `axis cs`. tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos) else: # relative to the entire figure, it's a getting a littler harder. See # <http://tex.stackexchange.com/a/274902/13262> for a solution to the # problem: tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ).format(*pos) if "\n" in text: # http://tex.stackexchange.com/a/124114/13262 properties.append("align={}".format(ha)) # Manipulating the text here is actually against mpl2tikz's policy not # to do that. On the other hand, newlines should translate into # newlines. # We might want to remove this here in the future. text = text.replace("\n ", "\\\\") content.append( "\\node at {}[\n {}\n]{{{}}};\n".format( tikz_pos, ",\n ".join(properties), " ".join(style + [text]) ) ) return data, content
python
def draw_text(data, obj): """Paints text on the graph. """ content = [] properties = [] style = [] if isinstance(obj, mpl.text.Annotation): _annotation(obj, data, content) # 1: coordinates # 2: properties (shapes, rotation, etc) # 3: text style # 4: the text # -------1--------2---3--4-- pos = obj.get_position() # from .util import transform_to_data_coordinates # pos = transform_to_data_coordinates(obj, *pos) text = obj.get_text() if text in ["", data["current axis title"]]: # Text nodes which are direct children of Axes are typically titles. They are # already captured by the `title` property of pgfplots axes, so skip them here. return data, content size = obj.get_size() bbox = obj.get_bbox_patch() converter = mpl.colors.ColorConverter() # without the factor 0.5, the fonts are too big most of the time. # TODO fix this scaling = 0.5 * size / data["font size"] ff = data["float format"] if scaling != 1.0: properties.append(("scale=" + ff).format(scaling)) if bbox is not None: _bbox(bbox, data, properties, scaling) ha = obj.get_ha() va = obj.get_va() anchor = _transform_positioning(ha, va) if anchor is not None: properties.append(anchor) data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color())) properties.append("text={}".format(col)) properties.append("rotate={:.1f}".format(obj.get_rotation())) if obj.get_style() == "italic": style.append("\\itshape") else: assert obj.get_style() == "normal" # From matplotlib/font_manager.py: # weight_dict = { # 'ultralight' : 100, # 'light' : 200, # 'normal' : 400, # 'regular' : 400, # 'book' : 400, # 'medium' : 500, # 'roman' : 500, # 'semibold' : 600, # 'demibold' : 600, # 'demi' : 600, # 'bold' : 700, # 'heavy' : 800, # 'extra bold' : 800, # 'black' : 900} # # get_weights returns a numeric value in the range 0-1000 or one of # ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’, # ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’ weight = obj.get_weight() if weight in [ "semibold", "demibold", "demi", "bold", "heavy", "extra bold", "black", ] or (isinstance(weight, int) and weight > 550): style.append("\\bfseries") # \lfseries isn't that common yet # elif weight == 'light' or (isinstance(weight, int) and weight < 300): # style.append('\\lfseries') if obj.axes: # If the coordinates are relative to an axis, use `axis cs`. tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos) else: # relative to the entire figure, it's a getting a littler harder. See # <http://tex.stackexchange.com/a/274902/13262> for a solution to the # problem: tikz_pos = ( "({{$(current bounding box.south west)!" + ff + "!" "(current bounding box.south east)$}}" "|-" "{{$(current bounding box.south west)!" + ff + "!" "(current bounding box.north west)$}})" ).format(*pos) if "\n" in text: # http://tex.stackexchange.com/a/124114/13262 properties.append("align={}".format(ha)) # Manipulating the text here is actually against mpl2tikz's policy not # to do that. On the other hand, newlines should translate into # newlines. # We might want to remove this here in the future. text = text.replace("\n ", "\\\\") content.append( "\\node at {}[\n {}\n]{{{}}};\n".format( tikz_pos, ",\n ".join(properties), " ".join(style + [text]) ) ) return data, content
[ "def", "draw_text", "(", "data", ",", "obj", ")", ":", "content", "=", "[", "]", "properties", "=", "[", "]", "style", "=", "[", "]", "if", "isinstance", "(", "obj", ",", "mpl", ".", "text", ".", "Annotation", ")", ":", "_annotation", "(", "obj", ",", "data", ",", "content", ")", "# 1: coordinates", "# 2: properties (shapes, rotation, etc)", "# 3: text style", "# 4: the text", "# -------1--------2---3--4--", "pos", "=", "obj", ".", "get_position", "(", ")", "# from .util import transform_to_data_coordinates", "# pos = transform_to_data_coordinates(obj, *pos)", "text", "=", "obj", ".", "get_text", "(", ")", "if", "text", "in", "[", "\"\"", ",", "data", "[", "\"current axis title\"", "]", "]", ":", "# Text nodes which are direct children of Axes are typically titles. They are", "# already captured by the `title` property of pgfplots axes, so skip them here.", "return", "data", ",", "content", "size", "=", "obj", ".", "get_size", "(", ")", "bbox", "=", "obj", ".", "get_bbox_patch", "(", ")", "converter", "=", "mpl", ".", "colors", ".", "ColorConverter", "(", ")", "# without the factor 0.5, the fonts are too big most of the time.", "# TODO fix this", "scaling", "=", "0.5", "*", "size", "/", "data", "[", "\"font size\"", "]", "ff", "=", "data", "[", "\"float format\"", "]", "if", "scaling", "!=", "1.0", ":", "properties", ".", "append", "(", "(", "\"scale=\"", "+", "ff", ")", ".", "format", "(", "scaling", ")", ")", "if", "bbox", "is", "not", "None", ":", "_bbox", "(", "bbox", ",", "data", ",", "properties", ",", "scaling", ")", "ha", "=", "obj", ".", "get_ha", "(", ")", "va", "=", "obj", ".", "get_va", "(", ")", "anchor", "=", "_transform_positioning", "(", "ha", ",", "va", ")", "if", "anchor", "is", "not", "None", ":", "properties", ".", "append", "(", "anchor", ")", "data", ",", "col", ",", "_", "=", "color", ".", "mpl_color2xcolor", "(", "data", ",", "converter", ".", "to_rgb", "(", "obj", ".", "get_color", "(", ")", ")", ")", "properties", ".", "append", "(", "\"text={}\"", ".", "format", "(", "col", ")", ")", "properties", ".", "append", "(", "\"rotate={:.1f}\"", ".", "format", "(", "obj", ".", "get_rotation", "(", ")", ")", ")", "if", "obj", ".", "get_style", "(", ")", "==", "\"italic\"", ":", "style", ".", "append", "(", "\"\\\\itshape\"", ")", "else", ":", "assert", "obj", ".", "get_style", "(", ")", "==", "\"normal\"", "# From matplotlib/font_manager.py:", "# weight_dict = {", "# 'ultralight' : 100,", "# 'light' : 200,", "# 'normal' : 400,", "# 'regular' : 400,", "# 'book' : 400,", "# 'medium' : 500,", "# 'roman' : 500,", "# 'semibold' : 600,", "# 'demibold' : 600,", "# 'demi' : 600,", "# 'bold' : 700,", "# 'heavy' : 800,", "# 'extra bold' : 800,", "# 'black' : 900}", "#", "# get_weights returns a numeric value in the range 0-1000 or one of", "# ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’,", "# ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’", "weight", "=", "obj", ".", "get_weight", "(", ")", "if", "weight", "in", "[", "\"semibold\"", ",", "\"demibold\"", ",", "\"demi\"", ",", "\"bold\"", ",", "\"heavy\"", ",", "\"extra bold\"", ",", "\"black\"", ",", "]", "or", "(", "isinstance", "(", "weight", ",", "int", ")", "and", "weight", ">", "550", ")", ":", "style", ".", "append", "(", "\"\\\\bfseries\"", ")", "# \\lfseries isn't that common yet", "# elif weight == 'light' or (isinstance(weight, int) and weight < 300):", "# style.append('\\\\lfseries')", "if", "obj", ".", "axes", ":", "# If the coordinates are relative to an axis, use `axis cs`.", "tikz_pos", "=", "(", "\"(axis cs:\"", "+", "ff", "+", "\",\"", "+", "ff", "+", "\")\"", ")", ".", "format", "(", "*", "pos", ")", "else", ":", "# relative to the entire figure, it's a getting a littler harder. See", "# <http://tex.stackexchange.com/a/274902/13262> for a solution to the", "# problem:", "tikz_pos", "=", "(", "\"({{$(current bounding box.south west)!\"", "+", "ff", "+", "\"!\"", "\"(current bounding box.south east)$}}\"", "\"|-\"", "\"{{$(current bounding box.south west)!\"", "+", "ff", "+", "\"!\"", "\"(current bounding box.north west)$}})\"", ")", ".", "format", "(", "*", "pos", ")", "if", "\"\\n\"", "in", "text", ":", "# http://tex.stackexchange.com/a/124114/13262", "properties", ".", "append", "(", "\"align={}\"", ".", "format", "(", "ha", ")", ")", "# Manipulating the text here is actually against mpl2tikz's policy not", "# to do that. On the other hand, newlines should translate into", "# newlines.", "# We might want to remove this here in the future.", "text", "=", "text", ".", "replace", "(", "\"\\n \"", ",", "\"\\\\\\\\\"", ")", "content", ".", "append", "(", "\"\\\\node at {}[\\n {}\\n]{{{}}};\\n\"", ".", "format", "(", "tikz_pos", ",", "\",\\n \"", ".", "join", "(", "properties", ")", ",", "\" \"", ".", "join", "(", "style", "+", "[", "text", "]", ")", ")", ")", "return", "data", ",", "content" ]
Paints text on the graph.
[ "Paints", "text", "on", "the", "graph", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/text.py#L8-L126
233,174
nschloe/matplotlib2tikz
matplotlib2tikz/text.py
_transform_positioning
def _transform_positioning(ha, va): """Converts matplotlib positioning to pgf node positioning. Not quite accurate but the results are equivalent more or less.""" if ha == "center" and va == "center": return None ha_mpl_to_tikz = {"right": "east", "left": "west", "center": ""} va_mpl_to_tikz = { "top": "north", "bottom": "south", "center": "", "baseline": "base", } return "anchor={} {}".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip()
python
def _transform_positioning(ha, va): """Converts matplotlib positioning to pgf node positioning. Not quite accurate but the results are equivalent more or less.""" if ha == "center" and va == "center": return None ha_mpl_to_tikz = {"right": "east", "left": "west", "center": ""} va_mpl_to_tikz = { "top": "north", "bottom": "south", "center": "", "baseline": "base", } return "anchor={} {}".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip()
[ "def", "_transform_positioning", "(", "ha", ",", "va", ")", ":", "if", "ha", "==", "\"center\"", "and", "va", "==", "\"center\"", ":", "return", "None", "ha_mpl_to_tikz", "=", "{", "\"right\"", ":", "\"east\"", ",", "\"left\"", ":", "\"west\"", ",", "\"center\"", ":", "\"\"", "}", "va_mpl_to_tikz", "=", "{", "\"top\"", ":", "\"north\"", ",", "\"bottom\"", ":", "\"south\"", ",", "\"center\"", ":", "\"\"", ",", "\"baseline\"", ":", "\"base\"", ",", "}", "return", "\"anchor={} {}\"", ".", "format", "(", "va_mpl_to_tikz", "[", "va", "]", ",", "ha_mpl_to_tikz", "[", "ha", "]", ")", ".", "strip", "(", ")" ]
Converts matplotlib positioning to pgf node positioning. Not quite accurate but the results are equivalent more or less.
[ "Converts", "matplotlib", "positioning", "to", "pgf", "node", "positioning", ".", "Not", "quite", "accurate", "but", "the", "results", "are", "equivalent", "more", "or", "less", "." ]
ac5daca6f38b834d757f6c6ae6cc34121956f46b
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/text.py#L129-L142
233,175
turicas/rows
rows/plugins/plugin_json.py
import_from_json
def import_from_json(filename_or_fobj, encoding="utf-8", *args, **kwargs): """Import a JSON file or file-like object into a `rows.Table`. If a file-like object is provided it MUST be open in text (non-binary) mode on Python 3 and could be open in both binary or text mode on Python 2. """ source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="json", encoding=encoding) json_obj = json.load(source.fobj, encoding=source.encoding) field_names = list(json_obj[0].keys()) table_rows = [[item[key] for key in field_names] for item in json_obj] meta = {"imported_from": "json", "source": source} return create_table([field_names] + table_rows, meta=meta, *args, **kwargs)
python
def import_from_json(filename_or_fobj, encoding="utf-8", *args, **kwargs): """Import a JSON file or file-like object into a `rows.Table`. If a file-like object is provided it MUST be open in text (non-binary) mode on Python 3 and could be open in both binary or text mode on Python 2. """ source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="json", encoding=encoding) json_obj = json.load(source.fobj, encoding=source.encoding) field_names = list(json_obj[0].keys()) table_rows = [[item[key] for key in field_names] for item in json_obj] meta = {"imported_from": "json", "source": source} return create_table([field_names] + table_rows, meta=meta, *args, **kwargs)
[ "def", "import_from_json", "(", "filename_or_fobj", ",", "encoding", "=", "\"utf-8\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "\"rb\"", ",", "plugin_name", "=", "\"json\"", ",", "encoding", "=", "encoding", ")", "json_obj", "=", "json", ".", "load", "(", "source", ".", "fobj", ",", "encoding", "=", "source", ".", "encoding", ")", "field_names", "=", "list", "(", "json_obj", "[", "0", "]", ".", "keys", "(", ")", ")", "table_rows", "=", "[", "[", "item", "[", "key", "]", "for", "key", "in", "field_names", "]", "for", "item", "in", "json_obj", "]", "meta", "=", "{", "\"imported_from\"", ":", "\"json\"", ",", "\"source\"", ":", "source", "}", "return", "create_table", "(", "[", "field_names", "]", "+", "table_rows", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Import a JSON file or file-like object into a `rows.Table`. If a file-like object is provided it MUST be open in text (non-binary) mode on Python 3 and could be open in both binary or text mode on Python 2.
[ "Import", "a", "JSON", "file", "or", "file", "-", "like", "object", "into", "a", "rows", ".", "Table", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_json.py#L33-L47
233,176
turicas/rows
rows/plugins/plugin_json.py
export_to_json
def export_to_json( table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs ): """Export a `rows.Table` to a JSON file or file-like object. If a file-like object is provided it MUST be open in binary mode (like in `open('myfile.json', mode='wb')`). """ # TODO: will work only if table.fields is OrderedDict fields = table.fields prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) data = [ { field_name: _convert(value, fields[field_name], *args, **kwargs) for field_name, value in zip(field_names, row) } for row in prepared_table ] result = json.dumps(data, indent=indent) if type(result) is six.text_type: # Python 3 result = result.encode(encoding) if indent is not None: # clean up empty spaces at the end of lines result = b"\n".join(line.rstrip() for line in result.splitlines()) return export_data(filename_or_fobj, result, mode="wb")
python
def export_to_json( table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs ): """Export a `rows.Table` to a JSON file or file-like object. If a file-like object is provided it MUST be open in binary mode (like in `open('myfile.json', mode='wb')`). """ # TODO: will work only if table.fields is OrderedDict fields = table.fields prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) data = [ { field_name: _convert(value, fields[field_name], *args, **kwargs) for field_name, value in zip(field_names, row) } for row in prepared_table ] result = json.dumps(data, indent=indent) if type(result) is six.text_type: # Python 3 result = result.encode(encoding) if indent is not None: # clean up empty spaces at the end of lines result = b"\n".join(line.rstrip() for line in result.splitlines()) return export_data(filename_or_fobj, result, mode="wb")
[ "def", "export_to_json", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "encoding", "=", "\"utf-8\"", ",", "indent", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: will work only if table.fields is OrderedDict", "fields", "=", "table", ".", "fields", "prepared_table", "=", "prepare_to_export", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "field_names", "=", "next", "(", "prepared_table", ")", "data", "=", "[", "{", "field_name", ":", "_convert", "(", "value", ",", "fields", "[", "field_name", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "field_name", ",", "value", "in", "zip", "(", "field_names", ",", "row", ")", "}", "for", "row", "in", "prepared_table", "]", "result", "=", "json", ".", "dumps", "(", "data", ",", "indent", "=", "indent", ")", "if", "type", "(", "result", ")", "is", "six", ".", "text_type", ":", "# Python 3", "result", "=", "result", ".", "encode", "(", "encoding", ")", "if", "indent", "is", "not", "None", ":", "# clean up empty spaces at the end of lines", "result", "=", "b\"\\n\"", ".", "join", "(", "line", ".", "rstrip", "(", ")", "for", "line", "in", "result", ".", "splitlines", "(", ")", ")", "return", "export_data", "(", "filename_or_fobj", ",", "result", ",", "mode", "=", "\"wb\"", ")" ]
Export a `rows.Table` to a JSON file or file-like object. If a file-like object is provided it MUST be open in binary mode (like in `open('myfile.json', mode='wb')`).
[ "Export", "a", "rows", ".", "Table", "to", "a", "JSON", "file", "or", "file", "-", "like", "object", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_json.py#L68-L97
233,177
turicas/rows
rows/utils.py
plugin_name_by_uri
def plugin_name_by_uri(uri): "Return the plugin name based on the URI" # TODO: parse URIs like 'sqlite://' also parsed = urlparse(uri) basename = os.path.basename(parsed.path) if not basename.strip(): raise RuntimeError("Could not identify file format.") plugin_name = basename.split(".")[-1].lower() if plugin_name in FILE_EXTENSIONS: plugin_name = MIME_TYPE_TO_PLUGIN_NAME[FILE_EXTENSIONS[plugin_name]] return plugin_name
python
def plugin_name_by_uri(uri): "Return the plugin name based on the URI" # TODO: parse URIs like 'sqlite://' also parsed = urlparse(uri) basename = os.path.basename(parsed.path) if not basename.strip(): raise RuntimeError("Could not identify file format.") plugin_name = basename.split(".")[-1].lower() if plugin_name in FILE_EXTENSIONS: plugin_name = MIME_TYPE_TO_PLUGIN_NAME[FILE_EXTENSIONS[plugin_name]] return plugin_name
[ "def", "plugin_name_by_uri", "(", "uri", ")", ":", "# TODO: parse URIs like 'sqlite://' also", "parsed", "=", "urlparse", "(", "uri", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "parsed", ".", "path", ")", "if", "not", "basename", ".", "strip", "(", ")", ":", "raise", "RuntimeError", "(", "\"Could not identify file format.\"", ")", "plugin_name", "=", "basename", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "if", "plugin_name", "in", "FILE_EXTENSIONS", ":", "plugin_name", "=", "MIME_TYPE_TO_PLUGIN_NAME", "[", "FILE_EXTENSIONS", "[", "plugin_name", "]", "]", "return", "plugin_name" ]
Return the plugin name based on the URI
[ "Return", "the", "plugin", "name", "based", "on", "the", "URI" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L249-L263
233,178
turicas/rows
rows/utils.py
extension_by_source
def extension_by_source(source, mime_type): "Return the file extension used by this plugin" # TODO: should get this information from the plugin extension = source.plugin_name if extension: return extension if mime_type: return mime_type.split("/")[-1]
python
def extension_by_source(source, mime_type): "Return the file extension used by this plugin" # TODO: should get this information from the plugin extension = source.plugin_name if extension: return extension if mime_type: return mime_type.split("/")[-1]
[ "def", "extension_by_source", "(", "source", ",", "mime_type", ")", ":", "# TODO: should get this information from the plugin", "extension", "=", "source", ".", "plugin_name", "if", "extension", ":", "return", "extension", "if", "mime_type", ":", "return", "mime_type", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]" ]
Return the file extension used by this plugin
[ "Return", "the", "file", "extension", "used", "by", "this", "plugin" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L266-L275
233,179
turicas/rows
rows/utils.py
plugin_name_by_mime_type
def plugin_name_by_mime_type(mime_type, mime_name, file_extension): "Return the plugin name based on the MIME type" return MIME_TYPE_TO_PLUGIN_NAME.get( normalize_mime_type(mime_type, mime_name, file_extension), None )
python
def plugin_name_by_mime_type(mime_type, mime_name, file_extension): "Return the plugin name based on the MIME type" return MIME_TYPE_TO_PLUGIN_NAME.get( normalize_mime_type(mime_type, mime_name, file_extension), None )
[ "def", "plugin_name_by_mime_type", "(", "mime_type", ",", "mime_name", ",", "file_extension", ")", ":", "return", "MIME_TYPE_TO_PLUGIN_NAME", ".", "get", "(", "normalize_mime_type", "(", "mime_type", ",", "mime_name", ",", "file_extension", ")", ",", "None", ")" ]
Return the plugin name based on the MIME type
[ "Return", "the", "plugin", "name", "based", "on", "the", "MIME", "type" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L297-L302
233,180
turicas/rows
rows/utils.py
detect_source
def detect_source(uri, verify_ssl, progress, timeout=5): """Return a `rows.Source` with information for a given URI If URI starts with "http" or "https" the file will be downloaded. This function should only be used if the URI already exists because it's going to download/open the file to detect its encoding and MIME type. """ # TODO: should also supporte other schemes, like file://, sqlite:// etc. if uri.lower().startswith("http://") or uri.lower().startswith("https://"): return download_file( uri, verify_ssl=verify_ssl, timeout=timeout, progress=progress, detect=True ) elif uri.startswith("postgres://"): return Source( should_delete=False, encoding=None, plugin_name="postgresql", uri=uri, is_file=False, local=None, ) else: return local_file(uri)
python
def detect_source(uri, verify_ssl, progress, timeout=5): """Return a `rows.Source` with information for a given URI If URI starts with "http" or "https" the file will be downloaded. This function should only be used if the URI already exists because it's going to download/open the file to detect its encoding and MIME type. """ # TODO: should also supporte other schemes, like file://, sqlite:// etc. if uri.lower().startswith("http://") or uri.lower().startswith("https://"): return download_file( uri, verify_ssl=verify_ssl, timeout=timeout, progress=progress, detect=True ) elif uri.startswith("postgres://"): return Source( should_delete=False, encoding=None, plugin_name="postgresql", uri=uri, is_file=False, local=None, ) else: return local_file(uri)
[ "def", "detect_source", "(", "uri", ",", "verify_ssl", ",", "progress", ",", "timeout", "=", "5", ")", ":", "# TODO: should also supporte other schemes, like file://, sqlite:// etc.", "if", "uri", ".", "lower", "(", ")", ".", "startswith", "(", "\"http://\"", ")", "or", "uri", ".", "lower", "(", ")", ".", "startswith", "(", "\"https://\"", ")", ":", "return", "download_file", "(", "uri", ",", "verify_ssl", "=", "verify_ssl", ",", "timeout", "=", "timeout", ",", "progress", "=", "progress", ",", "detect", "=", "True", ")", "elif", "uri", ".", "startswith", "(", "\"postgres://\"", ")", ":", "return", "Source", "(", "should_delete", "=", "False", ",", "encoding", "=", "None", ",", "plugin_name", "=", "\"postgresql\"", ",", "uri", "=", "uri", ",", "is_file", "=", "False", ",", "local", "=", "None", ",", ")", "else", ":", "return", "local_file", "(", "uri", ")" ]
Return a `rows.Source` with information for a given URI If URI starts with "http" or "https" the file will be downloaded. This function should only be used if the URI already exists because it's going to download/open the file to detect its encoding and MIME type.
[ "Return", "a", "rows", ".", "Source", "with", "information", "for", "a", "given", "URI" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L439-L465
233,181
turicas/rows
rows/utils.py
import_from_source
def import_from_source(source, default_encoding, *args, **kwargs): "Import data described in a `rows.Source` into a `rows.Table`" # TODO: test open_compressed plugin_name = source.plugin_name kwargs["encoding"] = ( kwargs.get("encoding", None) or source.encoding or default_encoding ) try: import_function = getattr(rows, "import_from_{}".format(plugin_name)) except AttributeError: raise ValueError('Plugin (import) "{}" not found'.format(plugin_name)) table = import_function(source.uri, *args, **kwargs) return table
python
def import_from_source(source, default_encoding, *args, **kwargs): "Import data described in a `rows.Source` into a `rows.Table`" # TODO: test open_compressed plugin_name = source.plugin_name kwargs["encoding"] = ( kwargs.get("encoding", None) or source.encoding or default_encoding ) try: import_function = getattr(rows, "import_from_{}".format(plugin_name)) except AttributeError: raise ValueError('Plugin (import) "{}" not found'.format(plugin_name)) table = import_function(source.uri, *args, **kwargs) return table
[ "def", "import_from_source", "(", "source", ",", "default_encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: test open_compressed", "plugin_name", "=", "source", ".", "plugin_name", "kwargs", "[", "\"encoding\"", "]", "=", "(", "kwargs", ".", "get", "(", "\"encoding\"", ",", "None", ")", "or", "source", ".", "encoding", "or", "default_encoding", ")", "try", ":", "import_function", "=", "getattr", "(", "rows", ",", "\"import_from_{}\"", ".", "format", "(", "plugin_name", ")", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "'Plugin (import) \"{}\" not found'", ".", "format", "(", "plugin_name", ")", ")", "table", "=", "import_function", "(", "source", ".", "uri", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "table" ]
Import data described in a `rows.Source` into a `rows.Table`
[ "Import", "data", "described", "in", "a", "rows", ".", "Source", "into", "a", "rows", ".", "Table" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L468-L484
233,182
turicas/rows
rows/utils.py
import_from_uri
def import_from_uri( uri, default_encoding="utf-8", verify_ssl=True, progress=False, *args, **kwargs ): "Given an URI, detects plugin and encoding and imports into a `rows.Table`" # TODO: support '-' also # TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can # skip encoding detection. source = detect_source(uri, verify_ssl=verify_ssl, progress=progress) return import_from_source(source, default_encoding, *args, **kwargs)
python
def import_from_uri( uri, default_encoding="utf-8", verify_ssl=True, progress=False, *args, **kwargs ): "Given an URI, detects plugin and encoding and imports into a `rows.Table`" # TODO: support '-' also # TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can # skip encoding detection. source = detect_source(uri, verify_ssl=verify_ssl, progress=progress) return import_from_source(source, default_encoding, *args, **kwargs)
[ "def", "import_from_uri", "(", "uri", ",", "default_encoding", "=", "\"utf-8\"", ",", "verify_ssl", "=", "True", ",", "progress", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: support '-' also", "# TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can", "# skip encoding detection.", "source", "=", "detect_source", "(", "uri", ",", "verify_ssl", "=", "verify_ssl", ",", "progress", "=", "progress", ")", "return", "import_from_source", "(", "source", ",", "default_encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Given an URI, detects plugin and encoding and imports into a `rows.Table`
[ "Given", "an", "URI", "detects", "plugin", "and", "encoding", "and", "imports", "into", "a", "rows", ".", "Table" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L487-L496
233,183
turicas/rows
rows/utils.py
open_compressed
def open_compressed(filename, mode="r", encoding=None): "Return a text-based file object from a filename, even if compressed" # TODO: integrate this function in the library itself, using # get_filename_and_fobj binary_mode = "b" in mode extension = str(filename).split(".")[-1].lower() if binary_mode and encoding: raise ValueError("encoding should not be specified in binary mode") if extension == "xz": if lzma is None: raise RuntimeError("lzma support is not installed") fobj = lzma.open(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "gz": fobj = gzip.GzipFile(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "bz2": if bz2 is None: raise RuntimeError("bzip2 support is not installed") if binary_mode: # ignore encoding return bz2.open(filename, mode=mode) else: if "t" not in mode: # For some reason, passing only mode='r' to bzip2 is equivalent # to 'rb', not 'rt', so we force it here. mode += "t" return bz2.open(filename, mode=mode, encoding=encoding) else: if binary_mode: return open(filename, mode=mode) else: return open(filename, mode=mode, encoding=encoding)
python
def open_compressed(filename, mode="r", encoding=None): "Return a text-based file object from a filename, even if compressed" # TODO: integrate this function in the library itself, using # get_filename_and_fobj binary_mode = "b" in mode extension = str(filename).split(".")[-1].lower() if binary_mode and encoding: raise ValueError("encoding should not be specified in binary mode") if extension == "xz": if lzma is None: raise RuntimeError("lzma support is not installed") fobj = lzma.open(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "gz": fobj = gzip.GzipFile(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "bz2": if bz2 is None: raise RuntimeError("bzip2 support is not installed") if binary_mode: # ignore encoding return bz2.open(filename, mode=mode) else: if "t" not in mode: # For some reason, passing only mode='r' to bzip2 is equivalent # to 'rb', not 'rt', so we force it here. mode += "t" return bz2.open(filename, mode=mode, encoding=encoding) else: if binary_mode: return open(filename, mode=mode) else: return open(filename, mode=mode, encoding=encoding)
[ "def", "open_compressed", "(", "filename", ",", "mode", "=", "\"r\"", ",", "encoding", "=", "None", ")", ":", "# TODO: integrate this function in the library itself, using", "# get_filename_and_fobj", "binary_mode", "=", "\"b\"", "in", "mode", "extension", "=", "str", "(", "filename", ")", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "if", "binary_mode", "and", "encoding", ":", "raise", "ValueError", "(", "\"encoding should not be specified in binary mode\"", ")", "if", "extension", "==", "\"xz\"", ":", "if", "lzma", "is", "None", ":", "raise", "RuntimeError", "(", "\"lzma support is not installed\"", ")", "fobj", "=", "lzma", ".", "open", "(", "filename", ",", "mode", "=", "mode", ")", "if", "binary_mode", ":", "return", "fobj", "else", ":", "return", "io", ".", "TextIOWrapper", "(", "fobj", ",", "encoding", "=", "encoding", ")", "elif", "extension", "==", "\"gz\"", ":", "fobj", "=", "gzip", ".", "GzipFile", "(", "filename", ",", "mode", "=", "mode", ")", "if", "binary_mode", ":", "return", "fobj", "else", ":", "return", "io", ".", "TextIOWrapper", "(", "fobj", ",", "encoding", "=", "encoding", ")", "elif", "extension", "==", "\"bz2\"", ":", "if", "bz2", "is", "None", ":", "raise", "RuntimeError", "(", "\"bzip2 support is not installed\"", ")", "if", "binary_mode", ":", "# ignore encoding", "return", "bz2", ".", "open", "(", "filename", ",", "mode", "=", "mode", ")", "else", ":", "if", "\"t\"", "not", "in", "mode", ":", "# For some reason, passing only mode='r' to bzip2 is equivalent", "# to 'rb', not 'rt', so we force it here.", "mode", "+=", "\"t\"", "return", "bz2", ".", "open", "(", "filename", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")", "else", ":", "if", "binary_mode", ":", "return", "open", "(", "filename", ",", "mode", "=", "mode", ")", "else", ":", "return", "open", "(", "filename", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")" ]
Return a text-based file object from a filename, even if compressed
[ "Return", "a", "text", "-", "based", "file", "object", "from", "a", "filename", "even", "if", "compressed" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L513-L557
233,184
turicas/rows
rows/utils.py
csv_to_sqlite
def csv_to_sqlite( input_filename, output_filename, samples=None, dialect=None, batch_size=10000, encoding="utf-8", callback=None, force_types=None, chunk_size=8388608, table_name="table1", schema=None, ): "Export a CSV file to SQLite, based on field type detection from samples" # TODO: automatically detect encoding if encoding == `None` # TODO: should be able to specify fields # TODO: if table_name is "2019" the final name will be "field_2019" - must # be "table_2019" # TODO: if schema is provided and the names are in uppercase, this function # will fail if dialect is None: # Get a sample to detect dialect fobj = open_compressed(input_filename, mode="rb") sample = fobj.read(chunk_size) dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Identify data types fobj = open_compressed(input_filename, encoding=encoding) data = list(islice(csv.DictReader(fobj, dialect=dialect), samples)) schema = rows.import_from_dicts(data).fields if force_types is not None: schema.update(force_types) # Create lazy table object to be converted # TODO: this lazyness feature will be incorported into the library soon so # we can call here `rows.import_from_csv` instead of `csv.reader`. reader = csv.reader( open_compressed(input_filename, encoding=encoding), dialect=dialect ) header = make_header(next(reader)) # skip header table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header])) table._rows = reader # Export to SQLite return rows.export_to_sqlite( table, output_filename, table_name=table_name, batch_size=batch_size, callback=callback, )
python
def csv_to_sqlite( input_filename, output_filename, samples=None, dialect=None, batch_size=10000, encoding="utf-8", callback=None, force_types=None, chunk_size=8388608, table_name="table1", schema=None, ): "Export a CSV file to SQLite, based on field type detection from samples" # TODO: automatically detect encoding if encoding == `None` # TODO: should be able to specify fields # TODO: if table_name is "2019" the final name will be "field_2019" - must # be "table_2019" # TODO: if schema is provided and the names are in uppercase, this function # will fail if dialect is None: # Get a sample to detect dialect fobj = open_compressed(input_filename, mode="rb") sample = fobj.read(chunk_size) dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Identify data types fobj = open_compressed(input_filename, encoding=encoding) data = list(islice(csv.DictReader(fobj, dialect=dialect), samples)) schema = rows.import_from_dicts(data).fields if force_types is not None: schema.update(force_types) # Create lazy table object to be converted # TODO: this lazyness feature will be incorported into the library soon so # we can call here `rows.import_from_csv` instead of `csv.reader`. reader = csv.reader( open_compressed(input_filename, encoding=encoding), dialect=dialect ) header = make_header(next(reader)) # skip header table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header])) table._rows = reader # Export to SQLite return rows.export_to_sqlite( table, output_filename, table_name=table_name, batch_size=batch_size, callback=callback, )
[ "def", "csv_to_sqlite", "(", "input_filename", ",", "output_filename", ",", "samples", "=", "None", ",", "dialect", "=", "None", ",", "batch_size", "=", "10000", ",", "encoding", "=", "\"utf-8\"", ",", "callback", "=", "None", ",", "force_types", "=", "None", ",", "chunk_size", "=", "8388608", ",", "table_name", "=", "\"table1\"", ",", "schema", "=", "None", ",", ")", ":", "# TODO: automatically detect encoding if encoding == `None`", "# TODO: should be able to specify fields", "# TODO: if table_name is \"2019\" the final name will be \"field_2019\" - must", "# be \"table_2019\"", "# TODO: if schema is provided and the names are in uppercase, this function", "# will fail", "if", "dialect", "is", "None", ":", "# Get a sample to detect dialect", "fobj", "=", "open_compressed", "(", "input_filename", ",", "mode", "=", "\"rb\"", ")", "sample", "=", "fobj", ".", "read", "(", "chunk_size", ")", "dialect", "=", "rows", ".", "plugins", ".", "csv", ".", "discover_dialect", "(", "sample", ",", "encoding", "=", "encoding", ")", "elif", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "schema", "is", "None", ":", "# Identify data types", "fobj", "=", "open_compressed", "(", "input_filename", ",", "encoding", "=", "encoding", ")", "data", "=", "list", "(", "islice", "(", "csv", ".", "DictReader", "(", "fobj", ",", "dialect", "=", "dialect", ")", ",", "samples", ")", ")", "schema", "=", "rows", ".", "import_from_dicts", "(", "data", ")", ".", "fields", "if", "force_types", "is", "not", "None", ":", "schema", ".", "update", "(", "force_types", ")", "# Create lazy table object to be converted", "# TODO: this lazyness feature will be incorported into the library soon so", "# we can call here `rows.import_from_csv` instead of `csv.reader`.", "reader", "=", "csv", ".", "reader", "(", "open_compressed", "(", "input_filename", ",", "encoding", "=", "encoding", ")", ",", "dialect", "=", "dialect", ")", "header", "=", "make_header", "(", "next", "(", "reader", ")", ")", "# skip header", "table", "=", "rows", ".", "Table", "(", "fields", "=", "OrderedDict", "(", "[", "(", "field", ",", "schema", "[", "field", "]", ")", "for", "field", "in", "header", "]", ")", ")", "table", ".", "_rows", "=", "reader", "# Export to SQLite", "return", "rows", ".", "export_to_sqlite", "(", "table", ",", "output_filename", ",", "table_name", "=", "table_name", ",", "batch_size", "=", "batch_size", ",", "callback", "=", "callback", ",", ")" ]
Export a CSV file to SQLite, based on field type detection from samples
[ "Export", "a", "CSV", "file", "to", "SQLite", "based", "on", "field", "type", "detection", "from", "samples" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L560-L613
233,185
turicas/rows
rows/utils.py
sqlite_to_csv
def sqlite_to_csv( input_filename, table_name, output_filename, dialect=csv.excel, batch_size=10000, encoding="utf-8", callback=None, query=None, ): """Export a table inside a SQLite database to CSV""" # TODO: should be able to specify fields # TODO: should be able to specify custom query if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if query is None: query = "SELECT * FROM {}".format(table_name) connection = sqlite3.Connection(input_filename) cursor = connection.cursor() result = cursor.execute(query) header = [item[0] for item in cursor.description] fobj = open_compressed(output_filename, mode="w", encoding=encoding) writer = csv.writer(fobj, dialect=dialect) writer.writerow(header) total_written = 0 for batch in rows.plugins.utils.ipartition(result, batch_size): writer.writerows(batch) written = len(batch) total_written += written if callback: callback(written, total_written) fobj.close()
python
def sqlite_to_csv( input_filename, table_name, output_filename, dialect=csv.excel, batch_size=10000, encoding="utf-8", callback=None, query=None, ): """Export a table inside a SQLite database to CSV""" # TODO: should be able to specify fields # TODO: should be able to specify custom query if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if query is None: query = "SELECT * FROM {}".format(table_name) connection = sqlite3.Connection(input_filename) cursor = connection.cursor() result = cursor.execute(query) header = [item[0] for item in cursor.description] fobj = open_compressed(output_filename, mode="w", encoding=encoding) writer = csv.writer(fobj, dialect=dialect) writer.writerow(header) total_written = 0 for batch in rows.plugins.utils.ipartition(result, batch_size): writer.writerows(batch) written = len(batch) total_written += written if callback: callback(written, total_written) fobj.close()
[ "def", "sqlite_to_csv", "(", "input_filename", ",", "table_name", ",", "output_filename", ",", "dialect", "=", "csv", ".", "excel", ",", "batch_size", "=", "10000", ",", "encoding", "=", "\"utf-8\"", ",", "callback", "=", "None", ",", "query", "=", "None", ",", ")", ":", "# TODO: should be able to specify fields", "# TODO: should be able to specify custom query", "if", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "query", "is", "None", ":", "query", "=", "\"SELECT * FROM {}\"", ".", "format", "(", "table_name", ")", "connection", "=", "sqlite3", ".", "Connection", "(", "input_filename", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "result", "=", "cursor", ".", "execute", "(", "query", ")", "header", "=", "[", "item", "[", "0", "]", "for", "item", "in", "cursor", ".", "description", "]", "fobj", "=", "open_compressed", "(", "output_filename", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "encoding", ")", "writer", "=", "csv", ".", "writer", "(", "fobj", ",", "dialect", "=", "dialect", ")", "writer", ".", "writerow", "(", "header", ")", "total_written", "=", "0", "for", "batch", "in", "rows", ".", "plugins", ".", "utils", ".", "ipartition", "(", "result", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "written", "=", "len", "(", "batch", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "fobj", ".", "close", "(", ")" ]
Export a table inside a SQLite database to CSV
[ "Export", "a", "table", "inside", "a", "SQLite", "database", "to", "CSV" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L616-L650
233,186
turicas/rows
rows/utils.py
execute_command
def execute_command(command): """Execute a command and return its output""" command = shlex.split(command) try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: raise RuntimeError("Command not found: {}".format(repr(command))) process.wait() # TODO: may use another codec to decode if process.returncode > 0: stderr = process.stderr.read().decode("utf-8") raise ValueError("Error executing command: {}".format(repr(stderr))) return process.stdout.read().decode("utf-8")
python
def execute_command(command): """Execute a command and return its output""" command = shlex.split(command) try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: raise RuntimeError("Command not found: {}".format(repr(command))) process.wait() # TODO: may use another codec to decode if process.returncode > 0: stderr = process.stderr.read().decode("utf-8") raise ValueError("Error executing command: {}".format(repr(stderr))) return process.stdout.read().decode("utf-8")
[ "def", "execute_command", "(", "command", ")", ":", "command", "=", "shlex", ".", "split", "(", "command", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command not found: {}\"", ".", "format", "(", "repr", "(", "command", ")", ")", ")", "process", ".", "wait", "(", ")", "# TODO: may use another codec to decode", "if", "process", ".", "returncode", ">", "0", ":", "stderr", "=", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "raise", "ValueError", "(", "\"Error executing command: {}\"", ".", "format", "(", "repr", "(", "stderr", ")", ")", ")", "return", "process", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
Execute a command and return its output
[ "Execute", "a", "command", "and", "return", "its", "output" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L706-L724
233,187
turicas/rows
rows/utils.py
uncompressed_size
def uncompressed_size(filename): """Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value. """ quoted_filename = shlex.quote(filename) # TODO: get filetype from file-magic, if available if str(filename).lower().endswith(".xz"): output = execute_command('xz --list "{}"'.format(quoted_filename)) compressed, uncompressed = regexp_sizes.findall(output) value, unit = uncompressed.split() value = float(value.replace(",", "")) return int(value * MULTIPLIERS[unit]) elif str(filename).lower().endswith(".gz"): # XXX: gzip only uses 32 bits to store uncompressed size, so if the # uncompressed size is greater than 4GiB, the value returned will be # incorrect. output = execute_command('gzip --list "{}"'.format(quoted_filename)) lines = [line.split() for line in output.splitlines()] header, data = lines[0], lines[1] gzip_data = dict(zip(header, data)) return int(gzip_data["uncompressed"]) else: raise ValueError('Unrecognized file type for "{}".'.format(filename))
python
def uncompressed_size(filename): """Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value. """ quoted_filename = shlex.quote(filename) # TODO: get filetype from file-magic, if available if str(filename).lower().endswith(".xz"): output = execute_command('xz --list "{}"'.format(quoted_filename)) compressed, uncompressed = regexp_sizes.findall(output) value, unit = uncompressed.split() value = float(value.replace(",", "")) return int(value * MULTIPLIERS[unit]) elif str(filename).lower().endswith(".gz"): # XXX: gzip only uses 32 bits to store uncompressed size, so if the # uncompressed size is greater than 4GiB, the value returned will be # incorrect. output = execute_command('gzip --list "{}"'.format(quoted_filename)) lines = [line.split() for line in output.splitlines()] header, data = lines[0], lines[1] gzip_data = dict(zip(header, data)) return int(gzip_data["uncompressed"]) else: raise ValueError('Unrecognized file type for "{}".'.format(filename))
[ "def", "uncompressed_size", "(", "filename", ")", ":", "quoted_filename", "=", "shlex", ".", "quote", "(", "filename", ")", "# TODO: get filetype from file-magic, if available", "if", "str", "(", "filename", ")", ".", "lower", "(", ")", ".", "endswith", "(", "\".xz\"", ")", ":", "output", "=", "execute_command", "(", "'xz --list \"{}\"'", ".", "format", "(", "quoted_filename", ")", ")", "compressed", ",", "uncompressed", "=", "regexp_sizes", ".", "findall", "(", "output", ")", "value", ",", "unit", "=", "uncompressed", ".", "split", "(", ")", "value", "=", "float", "(", "value", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "return", "int", "(", "value", "*", "MULTIPLIERS", "[", "unit", "]", ")", "elif", "str", "(", "filename", ")", ".", "lower", "(", ")", ".", "endswith", "(", "\".gz\"", ")", ":", "# XXX: gzip only uses 32 bits to store uncompressed size, so if the", "# uncompressed size is greater than 4GiB, the value returned will be", "# incorrect.", "output", "=", "execute_command", "(", "'gzip --list \"{}\"'", ".", "format", "(", "quoted_filename", ")", ")", "lines", "=", "[", "line", ".", "split", "(", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", "]", "header", ",", "data", "=", "lines", "[", "0", "]", ",", "lines", "[", "1", "]", "gzip_data", "=", "dict", "(", "zip", "(", "header", ",", "data", ")", ")", "return", "int", "(", "gzip_data", "[", "\"uncompressed\"", "]", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized file type for \"{}\".'", ".", "format", "(", "filename", ")", ")" ]
Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value.
[ "Return", "the", "uncompressed", "size", "for", "a", "file", "by", "executing", "commands" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L727-L755
233,188
turicas/rows
rows/utils.py
pgimport
def pgimport( filename, database_uri, table_name, encoding="utf-8", dialect=None, create_table=True, schema=None, callback=None, timeout=0.1, chunk_size=8388608, max_samples=10000, ): """Import data from CSV into PostgreSQL using the fastest method Required: psql command """ fobj = open_compressed(filename, mode="r", encoding=encoding) sample = fobj.read(chunk_size) if dialect is None: # Detect dialect dialect = rows.plugins.csv.discover_dialect( sample.encode(encoding), encoding=encoding ) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Detect field names reader = csv.reader(io.StringIO(sample), dialect=dialect) field_names = [slug(field_name) for field_name in next(reader)] else: field_names = list(schema.keys()) if create_table: if schema is None: data = [ dict(zip(field_names, row)) for row in itertools.islice(reader, max_samples) ] table = rows.import_from_dicts(data) field_types = [table.fields[field_name] for field_name in field_names] else: field_types = list(schema.values()) columns = [ "{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE)) for name, type_ in zip(field_names, field_types) ] create_table = SQL_CREATE_TABLE.format( table_name=table_name, field_types=", ".join(columns) ) execute_command(get_psql_command(create_table, database_uri=database_uri)) # Prepare the `psql` command to be executed based on collected metadata command = get_psql_copy_command( database_uri=database_uri, dialect=dialect, direction="FROM", encoding=encoding, header=field_names, table_name=table_name, ) rows_imported, error = 0, None fobj = open_compressed(filename, mode="rb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) data = fobj.read(chunk_size) total_written = 0 while data != b"": written = process.stdin.write(data) total_written += written if callback: callback(written, total_written) data = fobj.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) rows_imported = int(stdout.replace(b"COPY ", b"").strip()) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written, "rows_imported": rows_imported}
python
def pgimport( filename, database_uri, table_name, encoding="utf-8", dialect=None, create_table=True, schema=None, callback=None, timeout=0.1, chunk_size=8388608, max_samples=10000, ): """Import data from CSV into PostgreSQL using the fastest method Required: psql command """ fobj = open_compressed(filename, mode="r", encoding=encoding) sample = fobj.read(chunk_size) if dialect is None: # Detect dialect dialect = rows.plugins.csv.discover_dialect( sample.encode(encoding), encoding=encoding ) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Detect field names reader = csv.reader(io.StringIO(sample), dialect=dialect) field_names = [slug(field_name) for field_name in next(reader)] else: field_names = list(schema.keys()) if create_table: if schema is None: data = [ dict(zip(field_names, row)) for row in itertools.islice(reader, max_samples) ] table = rows.import_from_dicts(data) field_types = [table.fields[field_name] for field_name in field_names] else: field_types = list(schema.values()) columns = [ "{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE)) for name, type_ in zip(field_names, field_types) ] create_table = SQL_CREATE_TABLE.format( table_name=table_name, field_types=", ".join(columns) ) execute_command(get_psql_command(create_table, database_uri=database_uri)) # Prepare the `psql` command to be executed based on collected metadata command = get_psql_copy_command( database_uri=database_uri, dialect=dialect, direction="FROM", encoding=encoding, header=field_names, table_name=table_name, ) rows_imported, error = 0, None fobj = open_compressed(filename, mode="rb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) data = fobj.read(chunk_size) total_written = 0 while data != b"": written = process.stdin.write(data) total_written += written if callback: callback(written, total_written) data = fobj.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) rows_imported = int(stdout.replace(b"COPY ", b"").strip()) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written, "rows_imported": rows_imported}
[ "def", "pgimport", "(", "filename", ",", "database_uri", ",", "table_name", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "None", ",", "create_table", "=", "True", ",", "schema", "=", "None", ",", "callback", "=", "None", ",", "timeout", "=", "0.1", ",", "chunk_size", "=", "8388608", ",", "max_samples", "=", "10000", ",", ")", ":", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"r\"", ",", "encoding", "=", "encoding", ")", "sample", "=", "fobj", ".", "read", "(", "chunk_size", ")", "if", "dialect", "is", "None", ":", "# Detect dialect", "dialect", "=", "rows", ".", "plugins", ".", "csv", ".", "discover_dialect", "(", "sample", ".", "encode", "(", "encoding", ")", ",", "encoding", "=", "encoding", ")", "elif", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "schema", "is", "None", ":", "# Detect field names", "reader", "=", "csv", ".", "reader", "(", "io", ".", "StringIO", "(", "sample", ")", ",", "dialect", "=", "dialect", ")", "field_names", "=", "[", "slug", "(", "field_name", ")", "for", "field_name", "in", "next", "(", "reader", ")", "]", "else", ":", "field_names", "=", "list", "(", "schema", ".", "keys", "(", ")", ")", "if", "create_table", ":", "if", "schema", "is", "None", ":", "data", "=", "[", "dict", "(", "zip", "(", "field_names", ",", "row", ")", ")", "for", "row", "in", "itertools", ".", "islice", "(", "reader", ",", "max_samples", ")", "]", "table", "=", "rows", ".", "import_from_dicts", "(", "data", ")", "field_types", "=", "[", "table", ".", "fields", "[", "field_name", "]", "for", "field_name", "in", "field_names", "]", "else", ":", "field_types", "=", "list", "(", "schema", ".", "values", "(", ")", ")", "columns", "=", "[", "\"{} {}\"", ".", "format", "(", "name", ",", "POSTGRESQL_TYPES", ".", "get", "(", "type_", ",", "DEFAULT_POSTGRESQL_TYPE", ")", ")", "for", "name", ",", "type_", "in", "zip", "(", "field_names", ",", "field_types", ")", "]", "create_table", "=", "SQL_CREATE_TABLE", ".", "format", "(", "table_name", "=", "table_name", ",", "field_types", "=", "\", \"", ".", "join", "(", "columns", ")", ")", "execute_command", "(", "get_psql_command", "(", "create_table", ",", "database_uri", "=", "database_uri", ")", ")", "# Prepare the `psql` command to be executed based on collected metadata", "command", "=", "get_psql_copy_command", "(", "database_uri", "=", "database_uri", ",", "dialect", "=", "dialect", ",", "direction", "=", "\"FROM\"", ",", "encoding", "=", "encoding", ",", "header", "=", "field_names", ",", "table_name", "=", "table_name", ",", ")", "rows_imported", ",", "error", "=", "0", ",", "None", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"rb\"", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "data", "=", "fobj", ".", "read", "(", "chunk_size", ")", "total_written", "=", "0", "while", "data", "!=", "b\"\"", ":", "written", "=", "process", ".", "stdin", ".", "write", "(", "data", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "data", "=", "fobj", ".", "read", "(", "chunk_size", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "if", "stderr", "!=", "b\"\"", ":", "raise", "RuntimeError", "(", "stderr", ".", "decode", "(", "\"utf-8\"", ")", ")", "rows_imported", "=", "int", "(", "stdout", ".", "replace", "(", "b\"COPY \"", ",", "b\"\"", ")", ".", "strip", "(", ")", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command `psql` not found\"", ")", "except", "BrokenPipeError", ":", "raise", "RuntimeError", "(", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "{", "\"bytes_written\"", ":", "total_written", ",", "\"rows_imported\"", ":", "rows_imported", "}" ]
Import data from CSV into PostgreSQL using the fastest method Required: psql command
[ "Import", "data", "from", "CSV", "into", "PostgreSQL", "using", "the", "fastest", "method" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L831-L924
233,189
turicas/rows
rows/utils.py
pgexport
def pgexport( database_uri, table_name, filename, encoding="utf-8", dialect=csv.excel, callback=None, timeout=0.1, chunk_size=8388608, ): """Export data from PostgreSQL into a CSV file using the fastest method Required: psql command """ if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) # Prepare the `psql` command to be executed to export data command = get_psql_copy_command( database_uri=database_uri, direction="TO", encoding=encoding, header=None, # Needed when direction = 'TO' table_name=table_name, dialect=dialect, ) fobj = open_compressed(filename, mode="wb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) total_written = 0 data = process.stdout.read(chunk_size) while data != b"": written = fobj.write(data) total_written += written if callback: callback(written, total_written) data = process.stdout.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written}
python
def pgexport( database_uri, table_name, filename, encoding="utf-8", dialect=csv.excel, callback=None, timeout=0.1, chunk_size=8388608, ): """Export data from PostgreSQL into a CSV file using the fastest method Required: psql command """ if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) # Prepare the `psql` command to be executed to export data command = get_psql_copy_command( database_uri=database_uri, direction="TO", encoding=encoding, header=None, # Needed when direction = 'TO' table_name=table_name, dialect=dialect, ) fobj = open_compressed(filename, mode="wb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) total_written = 0 data = process.stdout.read(chunk_size) while data != b"": written = fobj.write(data) total_written += written if callback: callback(written, total_written) data = process.stdout.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written}
[ "def", "pgexport", "(", "database_uri", ",", "table_name", ",", "filename", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "csv", ".", "excel", ",", "callback", "=", "None", ",", "timeout", "=", "0.1", ",", "chunk_size", "=", "8388608", ",", ")", ":", "if", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "# Prepare the `psql` command to be executed to export data", "command", "=", "get_psql_copy_command", "(", "database_uri", "=", "database_uri", ",", "direction", "=", "\"TO\"", ",", "encoding", "=", "encoding", ",", "header", "=", "None", ",", "# Needed when direction = 'TO'", "table_name", "=", "table_name", ",", "dialect", "=", "dialect", ",", ")", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"wb\"", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "total_written", "=", "0", "data", "=", "process", ".", "stdout", ".", "read", "(", "chunk_size", ")", "while", "data", "!=", "b\"\"", ":", "written", "=", "fobj", ".", "write", "(", "data", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "data", "=", "process", ".", "stdout", ".", "read", "(", "chunk_size", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "if", "stderr", "!=", "b\"\"", ":", "raise", "RuntimeError", "(", "stderr", ".", "decode", "(", "\"utf-8\"", ")", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command `psql` not found\"", ")", "except", "BrokenPipeError", ":", "raise", "RuntimeError", "(", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "{", "\"bytes_written\"", ":", "total_written", "}" ]
Export data from PostgreSQL into a CSV file using the fastest method Required: psql command
[ "Export", "data", "from", "PostgreSQL", "into", "a", "CSV", "file", "using", "the", "fastest", "method" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L927-L980
233,190
turicas/rows
rows/utils.py
load_schema
def load_schema(filename, context=None): """Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField} """ table = import_from_uri(filename) field_names = table.field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key.replace("Field", "").lower(): getattr(rows.fields, key) for key in dir(rows.fields) if "Field" in key and key != "Field" } return OrderedDict( [ (row.field_name, context[row.field_type]) for row in table ] )
python
def load_schema(filename, context=None): """Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField} """ table = import_from_uri(filename) field_names = table.field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key.replace("Field", "").lower(): getattr(rows.fields, key) for key in dir(rows.fields) if "Field" in key and key != "Field" } return OrderedDict( [ (row.field_name, context[row.field_type]) for row in table ] )
[ "def", "load_schema", "(", "filename", ",", "context", "=", "None", ")", ":", "table", "=", "import_from_uri", "(", "filename", ")", "field_names", "=", "table", ".", "field_names", "assert", "\"field_name\"", "in", "field_names", "assert", "\"field_type\"", "in", "field_names", "context", "=", "context", "or", "{", "key", ".", "replace", "(", "\"Field\"", ",", "\"\"", ")", ".", "lower", "(", ")", ":", "getattr", "(", "rows", ".", "fields", ",", "key", ")", "for", "key", "in", "dir", "(", "rows", ".", "fields", ")", "if", "\"Field\"", "in", "key", "and", "key", "!=", "\"Field\"", "}", "return", "OrderedDict", "(", "[", "(", "row", ".", "field_name", ",", "context", "[", "row", ".", "field_type", "]", ")", "for", "row", "in", "table", "]", ")" ]
Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField}
[ "Load", "schema", "from", "file", "in", "any", "of", "the", "supported", "formats" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L1082-L1104
233,191
turicas/rows
rows/fields.py
slug
def slug(text, separator="_", permitted_chars=SLUG_CHARS): """Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen' """ text = six.text_type(text or "") # Strip non-ASCII characters # Example: u' ÁLVARO justen% ' -> ' ALVARO justen% ' text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii") # Replace word boundaries with separator text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text) # Remove non-permitted characters and put everything to lowercase # Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_' allowed_chars = list(permitted_chars) + [separator] text = "".join(char for char in text if char in allowed_chars).lower() # Remove double occurrencies of separator # Example: u'_alvaro__justen_' -> u'_alvaro_justen_' text = ( REGEXP_SEPARATOR if separator == "_" else re.compile("(" + re.escape(separator) + "+)") ).sub(separator, text) # Strip separators # Example: u'_alvaro_justen_' -> u'alvaro_justen' return text.strip(separator)
python
def slug(text, separator="_", permitted_chars=SLUG_CHARS): """Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen' """ text = six.text_type(text or "") # Strip non-ASCII characters # Example: u' ÁLVARO justen% ' -> ' ALVARO justen% ' text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii") # Replace word boundaries with separator text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text) # Remove non-permitted characters and put everything to lowercase # Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_' allowed_chars = list(permitted_chars) + [separator] text = "".join(char for char in text if char in allowed_chars).lower() # Remove double occurrencies of separator # Example: u'_alvaro__justen_' -> u'_alvaro_justen_' text = ( REGEXP_SEPARATOR if separator == "_" else re.compile("(" + re.escape(separator) + "+)") ).sub(separator, text) # Strip separators # Example: u'_alvaro_justen_' -> u'alvaro_justen' return text.strip(separator)
[ "def", "slug", "(", "text", ",", "separator", "=", "\"_\"", ",", "permitted_chars", "=", "SLUG_CHARS", ")", ":", "text", "=", "six", ".", "text_type", "(", "text", "or", "\"\"", ")", "# Strip non-ASCII characters", "# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '", "text", "=", "normalize", "(", "\"NFKD\"", ",", "text", ".", "strip", "(", ")", ")", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ".", "decode", "(", "\"ascii\"", ")", "# Replace word boundaries with separator", "text", "=", "REGEXP_WORD_BOUNDARY", ".", "sub", "(", "\"\\\\1\"", "+", "re", ".", "escape", "(", "separator", ")", ",", "text", ")", "# Remove non-permitted characters and put everything to lowercase", "# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'", "allowed_chars", "=", "list", "(", "permitted_chars", ")", "+", "[", "separator", "]", "text", "=", "\"\"", ".", "join", "(", "char", "for", "char", "in", "text", "if", "char", "in", "allowed_chars", ")", ".", "lower", "(", ")", "# Remove double occurrencies of separator", "# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'", "text", "=", "(", "REGEXP_SEPARATOR", "if", "separator", "==", "\"_\"", "else", "re", ".", "compile", "(", "\"(\"", "+", "re", ".", "escape", "(", "separator", ")", "+", "\"+)\"", ")", ")", ".", "sub", "(", "separator", ",", "text", ")", "# Strip separators", "# Example: u'_alvaro_justen_' -> u'alvaro_justen'", "return", "text", ".", "strip", "(", "separator", ")" ]
Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen'
[ "Generate", "a", "slug", "for", "the", "text", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L520-L553
233,192
turicas/rows
rows/fields.py
make_unique_name
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2): """Return a unique name based on `name_format` and `name`.""" index = start new_name = name while new_name in existing_names: new_name = name_format.format(name=name, index=index) index += 1 return new_name
python
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2): """Return a unique name based on `name_format` and `name`.""" index = start new_name = name while new_name in existing_names: new_name = name_format.format(name=name, index=index) index += 1 return new_name
[ "def", "make_unique_name", "(", "name", ",", "existing_names", ",", "name_format", "=", "\"{name}_{index}\"", ",", "start", "=", "2", ")", ":", "index", "=", "start", "new_name", "=", "name", "while", "new_name", "in", "existing_names", ":", "new_name", "=", "name_format", ".", "format", "(", "name", "=", "name", ",", "index", "=", "index", ")", "index", "+=", "1", "return", "new_name" ]
Return a unique name based on `name_format` and `name`.
[ "Return", "a", "unique", "name", "based", "on", "name_format", "and", "name", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L556-L564
233,193
turicas/rows
rows/fields.py
make_header
def make_header(field_names, permit_not=False): """Return unique and slugged field names.""" slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug(field_name, permitted_chars=slug_chars) for field_name in field_names ] result = [] for index, field_name in enumerate(header): if not field_name: field_name = "field_{}".format(index) elif field_name[0].isdigit(): field_name = "field_{}".format(field_name) if field_name in result: field_name = make_unique_name( name=field_name, existing_names=result, start=2 ) result.append(field_name) return result
python
def make_header(field_names, permit_not=False): """Return unique and slugged field names.""" slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug(field_name, permitted_chars=slug_chars) for field_name in field_names ] result = [] for index, field_name in enumerate(header): if not field_name: field_name = "field_{}".format(index) elif field_name[0].isdigit(): field_name = "field_{}".format(field_name) if field_name in result: field_name = make_unique_name( name=field_name, existing_names=result, start=2 ) result.append(field_name) return result
[ "def", "make_header", "(", "field_names", ",", "permit_not", "=", "False", ")", ":", "slug_chars", "=", "SLUG_CHARS", "if", "not", "permit_not", "else", "SLUG_CHARS", "+", "\"^\"", "header", "=", "[", "slug", "(", "field_name", ",", "permitted_chars", "=", "slug_chars", ")", "for", "field_name", "in", "field_names", "]", "result", "=", "[", "]", "for", "index", ",", "field_name", "in", "enumerate", "(", "header", ")", ":", "if", "not", "field_name", ":", "field_name", "=", "\"field_{}\"", ".", "format", "(", "index", ")", "elif", "field_name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "field_name", "=", "\"field_{}\"", ".", "format", "(", "field_name", ")", "if", "field_name", "in", "result", ":", "field_name", "=", "make_unique_name", "(", "name", "=", "field_name", ",", "existing_names", "=", "result", ",", "start", "=", "2", ")", "result", ".", "append", "(", "field_name", ")", "return", "result" ]
Return unique and slugged field names.
[ "Return", "unique", "and", "slugged", "field", "names", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L567-L587
233,194
turicas/rows
rows/fields.py
Field.deserialize
def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value
python
def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value
[ "def", "deserialize", "(", "cls", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "value", ",", "cls", ".", "TYPE", ")", ":", "return", "value", "elif", "is_null", "(", "value", ")", ":", "return", "None", "else", ":", "return", "value" ]
Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`.
[ "Deserialize", "a", "value", "just", "after", "importing", "it" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L91-L103
233,195
turicas/rows
rows/plugins/plugin_pdf.py
ExtractionAlgorithm.selected_objects
def selected_objects(self): """Filter out objects outside table boundaries""" return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
python
def selected_objects(self): """Filter out objects outside table boundaries""" return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
[ "def", "selected_objects", "(", "self", ")", ":", "return", "[", "obj", "for", "obj", "in", "self", ".", "text_objects", "if", "contains_or_overlap", "(", "self", ".", "table_bbox", ",", "obj", ".", "bbox", ")", "]" ]
Filter out objects outside table boundaries
[ "Filter", "out", "objects", "outside", "table", "boundaries" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_pdf.py#L446-L453
233,196
turicas/rows
examples/library/extract_links.py
transform
def transform(row, table): 'Extract links from "project" field and remove HTML from all' data = row._asdict() data["links"] = " ".join(extract_links(row.project)) for key, value in data.items(): if isinstance(value, six.text_type): data[key] = extract_text(value) return data
python
def transform(row, table): 'Extract links from "project" field and remove HTML from all' data = row._asdict() data["links"] = " ".join(extract_links(row.project)) for key, value in data.items(): if isinstance(value, six.text_type): data[key] = extract_text(value) return data
[ "def", "transform", "(", "row", ",", "table", ")", ":", "data", "=", "row", ".", "_asdict", "(", ")", "data", "[", "\"links\"", "]", "=", "\" \"", ".", "join", "(", "extract_links", "(", "row", ".", "project", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "data", "[", "key", "]", "=", "extract_text", "(", "value", ")", "return", "data" ]
Extract links from "project" field and remove HTML from all
[ "Extract", "links", "from", "project", "field", "and", "remove", "HTML", "from", "all" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/extract_links.py#L24-L32
233,197
turicas/rows
examples/library/brazilian_cities_wikipedia.py
transform
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
python
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
[ "def", "transform", "(", "row", ",", "table", ")", ":", "data", "=", "row", ".", "_asdict", "(", ")", "data", "[", "\"link\"", "]", "=", "urljoin", "(", "\"https://pt.wikipedia.org\"", ",", "data", "[", "\"link\"", "]", ")", "data", "[", "\"name\"", "]", ",", "data", "[", "\"state\"", "]", "=", "regexp_city_state", ".", "findall", "(", "data", "[", "\"name\"", "]", ")", "[", "0", "]", "return", "data" ]
Transform row "link" into full URL and add "state" based on "name"
[ "Transform", "row", "link", "into", "full", "URL", "and", "add", "state", "based", "on", "name" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/brazilian_cities_wikipedia.py#L34-L40
233,198
turicas/rows
rows/plugins/plugin_parquet.py
import_from_parquet
def import_from_parquet(filename_or_fobj, *args, **kwargs): """Import data from a Parquet file and return with rows.Table.""" source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb") # TODO: should look into `schema.converted_type` also types = OrderedDict( [ (schema.name, PARQUET_TO_ROWS[schema.type]) for schema in parquet._read_footer(source.fobj).schema if schema.type is not None ] ) header = list(types.keys()) table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy meta = {"imported_from": "parquet", "source": source} return create_table( [header] + table_rows, meta=meta, force_types=types, *args, **kwargs )
python
def import_from_parquet(filename_or_fobj, *args, **kwargs): """Import data from a Parquet file and return with rows.Table.""" source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb") # TODO: should look into `schema.converted_type` also types = OrderedDict( [ (schema.name, PARQUET_TO_ROWS[schema.type]) for schema in parquet._read_footer(source.fobj).schema if schema.type is not None ] ) header = list(types.keys()) table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy meta = {"imported_from": "parquet", "source": source} return create_table( [header] + table_rows, meta=meta, force_types=types, *args, **kwargs )
[ "def", "import_from_parquet", "(", "filename_or_fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"parquet\"", ",", "mode", "=", "\"rb\"", ")", "# TODO: should look into `schema.converted_type` also", "types", "=", "OrderedDict", "(", "[", "(", "schema", ".", "name", ",", "PARQUET_TO_ROWS", "[", "schema", ".", "type", "]", ")", "for", "schema", "in", "parquet", ".", "_read_footer", "(", "source", ".", "fobj", ")", ".", "schema", "if", "schema", ".", "type", "is", "not", "None", "]", ")", "header", "=", "list", "(", "types", ".", "keys", "(", ")", ")", "table_rows", "=", "list", "(", "parquet", ".", "reader", "(", "source", ".", "fobj", ")", ")", "# TODO: be lazy", "meta", "=", "{", "\"imported_from\"", ":", "\"parquet\"", ",", "\"source\"", ":", "source", "}", "return", "create_table", "(", "[", "header", "]", "+", "table_rows", ",", "meta", "=", "meta", ",", "force_types", "=", "types", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Import data from a Parquet file and return with rows.Table.
[ "Import", "data", "from", "a", "Parquet", "file", "and", "return", "with", "rows", ".", "Table", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_parquet.py#L47-L65
233,199
turicas/rows
rows/plugins/dicts.py
import_from_dicts
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
python
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
[ "def", "import_from_dicts", "(", "data", ",", "samples", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "iter", "(", "data", ")", "cached_rows", ",", "headers", "=", "[", "]", ",", "[", "]", "for", "index", ",", "row", "in", "enumerate", "(", "data", ",", "start", "=", "1", ")", ":", "cached_rows", ".", "append", "(", "row", ")", "for", "key", "in", "row", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "headers", ":", "headers", ".", "append", "(", "key", ")", "if", "samples", "and", "index", "==", "samples", ":", "break", "data_rows", "=", "(", "[", "row", ".", "get", "(", "header", ",", "None", ")", "for", "header", "in", "headers", "]", "for", "row", "in", "chain", "(", "cached_rows", ",", "data", ")", ")", "kwargs", "[", "\"samples\"", "]", "=", "samples", "meta", "=", "{", "\"imported_from\"", ":", "\"dicts\"", "}", "return", "create_table", "(", "chain", "(", "[", "headers", "]", ",", "data_rows", ")", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used).
[ "Import", "data", "from", "a", "iterable", "of", "dicts" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/dicts.py#L25-L52