_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q29900 | do_types_overlap | train | def do_types_overlap(schema, type_a, type_b):
"""Check whether two types overlap in a given schema.
Provided two composite types, determine if they "overlap". Two composite types
overlap when the Sets of possible concrete types for each intersect.
This is often used to determine if a fragment of a given type could possibly be
visited in a context of another type.
This function is commutative.
"""
# Equivalent types overlap
if type_a is type_b:
return True
if is_abstract_type(type_a):
if is_abstract_type(type_b):
# If both types are abstract, then determine if there is any intersection
# between possible concrete types of each.
return any(
schema.is_possible_type(type_b, type_)
for type_ in schema.get_possible_types(type_a)
)
# Determine if latter type is a possible concrete type of the former.
return schema.is_possible_type(type_a, type_b)
if is_abstract_type(type_b):
# Determine if former type is a possible concrete type of the latter.
return schema.is_possible_type(type_b, type_a)
# Otherwise the types do not overlap.
return False | python | {
"resource": ""
} |
q29901 | concat_ast | train | def concat_ast(asts: Sequence[DocumentNode]) -> DocumentNode:
"""Concat ASTs.
Provided a collection of ASTs, presumably each from different files, concatenate
the ASTs together into batched AST, useful for validating many GraphQL source files
which together represent one conceptual application.
"""
return DocumentNode(
definitions=list(chain.from_iterable(document.definitions for document in asts))
) | python | {
"resource": ""
} |
q29902 | suggestion_list | train | def suggestion_list(input_: str, options: Collection[str]):
"""Get list with suggestions for a given input.
Given an invalid input string and list of valid options, returns a filtered list
of valid options sorted based on their similarity with the input.
"""
options_by_distance = {}
input_threshold = len(input_) // 2
for option in options:
distance = lexical_distance(input_, option)
threshold = max(input_threshold, len(option) // 2, 1)
if distance <= threshold:
options_by_distance[option] = distance
return sorted(options_by_distance, key=options_by_distance.get) | python | {
"resource": ""
} |
q29903 | lexical_distance | train | def lexical_distance(a_str: str, b_str: str) -> int:
"""Computes the lexical distance between strings A and B.
The "distance" between two strings is given by counting the minimum number of edits
needed to transform string A into string B. An edit can be an insertion, deletion,
or substitution of a single character, or a swap of two adjacent characters.
This distance can be useful for detecting typos in input or sorting.
"""
if a_str == b_str:
return 0
a, b = a_str.lower(), b_str.lower()
a_len, b_len = len(a), len(b)
# Any case change counts as a single edit
if a == b:
return 1
d = [[j for j in range(0, b_len + 1)]]
for i in range(1, a_len + 1):
d.append([i] + [0] * b_len)
for i in range(1, a_len + 1):
for j in range(1, b_len + 1):
cost = 0 if a[i - 1] == b[j - 1] else 1
d[i][j] = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + cost)
if i > 1 and j > 1 and a[i - 1] == b[j - 2] and a[i - 2] == b[j - 1]:
d[i][j] = min(d[i][j], d[i - 2][j - 2] + cost)
return d[a_len][b_len] | python | {
"resource": ""
} |
q29904 | get_operation_ast | train | def get_operation_ast(
document_ast: DocumentNode, operation_name: Optional[str] = None
) -> Optional[OperationDefinitionNode]:
"""Get operation AST node.
Returns an operation AST given a document AST and optionally an operation
name. If a name is not provided, an operation is only returned if only one
is provided in the document.
"""
operation = None
for definition in document_ast.definitions:
if isinstance(definition, OperationDefinitionNode):
if not operation_name:
# If no operation name was provided, only return an Operation if there
# is one defined in the document.
# Upon encountering the second, return None.
if operation:
return None
operation = definition
elif definition.name and definition.name.value == operation_name:
return definition
return operation | python | {
"resource": ""
} |
q29905 | format_error | train | def format_error(error: "GraphQLError") -> dict:
"""Format a GraphQL error
Given a GraphQLError, format it according to the rules described by the "Response
Format, Errors" section of the GraphQL Specification.
"""
if not error:
raise ValueError("Received null or undefined error.")
formatted: Dict[str, Any] = dict( # noqa: E701 (pycqa/flake8#394)
message=error.message or "An unknown error occurred.",
locations=error.locations,
path=error.path,
)
if error.extensions:
formatted.update(extensions=error.extensions)
return formatted | python | {
"resource": ""
} |
q29906 | graphql | train | async def graphql(
schema: GraphQLSchema,
source: Union[str, Source],
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
type_resolver: GraphQLTypeResolver = None,
middleware: Middleware = None,
execution_context_class: Type[ExecutionContext] = ExecutionContext,
) -> ExecutionResult:
"""Execute a GraphQL operation asynchronously.
This is the primary entry point function for fulfilling GraphQL operations by
parsing, validating, and executing a GraphQL document along side a GraphQL schema.
More sophisticated GraphQL servers, such as those which persist queries, may wish
to separate the validation and execution phases to a static time tooling step,
and a server runtime step.
Accepts the following arguments:
:arg schema:
The GraphQL type system to use when validating and executing a query.
:arg source:
A GraphQL language formatted string representing the requested operation.
:arg root_value:
The value provided as the first argument to resolver functions on the top level
type (e.g. the query object type).
:arg context_value:
The context value is provided as an attribute of the second argument
(the resolve info) to resolver functions. It is used to pass shared information
useful at any point during query execution, for example the currently logged in
user and connections to databases or other services.
:arg variable_values:
A mapping of variable name to runtime value to use for all variables defined
in the request string.
:arg operation_name:
The name of the operation to use if request string contains multiple possible
operations. Can be omitted if request string contains only one operation.
:arg field_resolver:
A resolver function to use when one is not provided by the schema.
If not provided, the default field resolver is used (which looks for a value
or method on the source value with the field's name).
:arg type_resolver:
A type resolver function to use when none is provided by the schema.
If not provided, the default type resolver is used (which looks for a
`__typename` field or alternatively calls the `isTypeOf` method).
:arg middleware:
The middleware to wrap the resolvers with
:arg execution_context_class:
The execution context class to use to build the context
"""
# Always return asynchronously for a consistent API.
result = graphql_impl(
schema,
source,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
type_resolver,
middleware,
execution_context_class,
)
if isawaitable(result):
return await cast(Awaitable[ExecutionResult], result)
return cast(ExecutionResult, result) | python | {
"resource": ""
} |
q29907 | graphql_impl | train | def graphql_impl(
schema,
source,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
type_resolver,
middleware,
execution_context_class,
) -> AwaitableOrValue[ExecutionResult]:
"""Execute a query, return asynchronously only if necessary."""
# Validate Schema
schema_validation_errors = validate_schema(schema)
if schema_validation_errors:
return ExecutionResult(data=None, errors=schema_validation_errors)
# Parse
try:
document = parse(source)
except GraphQLError as error:
return ExecutionResult(data=None, errors=[error])
except Exception as error:
error = GraphQLError(str(error), original_error=error)
return ExecutionResult(data=None, errors=[error])
# Validate
from .validation import validate
validation_errors = validate(schema, document)
if validation_errors:
return ExecutionResult(data=None, errors=validation_errors)
# Execute
return execute(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
type_resolver,
middleware,
execution_context_class,
) | python | {
"resource": ""
} |
q29908 | get_named_type | train | def get_named_type(type_): # noqa: F811
"""Unwrap possible wrapping type"""
if type_:
unwrapped_type = type_
while is_wrapping_type(unwrapped_type):
unwrapped_type = cast(GraphQLWrappingType, unwrapped_type)
unwrapped_type = unwrapped_type.of_type
return cast(GraphQLNamedType, unwrapped_type)
return None | python | {
"resource": ""
} |
q29909 | get_nullable_type | train | def get_nullable_type(type_): # noqa: F811
"""Unwrap possible non-null type"""
if is_non_null_type(type_):
type_ = cast(GraphQLNonNull, type_)
type_ = type_.of_type
return cast(Optional[GraphQLNullableType], type_) | python | {
"resource": ""
} |
q29910 | GraphQLObjectType.fields | train | def fields(self) -> GraphQLFieldMap:
"""Get provided fields, wrapping them as GraphQLFields if needed."""
try:
fields = resolve_thunk(self._fields)
except GraphQLError:
raise
except Exception as error:
raise TypeError(f"{self.name} fields cannot be resolved: {error}")
if not isinstance(fields, dict) or not all(
isinstance(key, str) for key in fields
):
raise TypeError(
f"{self.name} fields must be a dict with field names as keys"
" or a function which returns such an object."
)
if not all(
isinstance(value, GraphQLField) or is_output_type(value)
for value in fields.values()
):
raise TypeError(
f"{self.name} fields must be GraphQLField or output type objects."
)
return {
name: value if isinstance(value, GraphQLField) else GraphQLField(value)
for name, value in fields.items()
} | python | {
"resource": ""
} |
q29911 | GraphQLObjectType.interfaces | train | def interfaces(self) -> GraphQLInterfaceList:
"""Get provided interfaces."""
try:
interfaces = resolve_thunk(self._interfaces)
except GraphQLError:
raise
except Exception as error:
raise TypeError(f"{self.name} interfaces cannot be resolved: {error}")
if interfaces is None:
interfaces = []
if not isinstance(interfaces, (list, tuple)):
raise TypeError(
f"{self.name} interfaces must be a list/tuple"
" or a function which returns a list/tuple."
)
if not all(isinstance(value, GraphQLInterfaceType) for value in interfaces):
raise TypeError(f"{self.name} interfaces must be GraphQLInterface objects.")
return interfaces[:] | python | {
"resource": ""
} |
q29912 | GraphQLUnionType.types | train | def types(self) -> GraphQLTypeList:
"""Get provided types."""
try:
types = resolve_thunk(self._types)
except GraphQLError:
raise
except Exception as error:
raise TypeError(f"{self.name} types cannot be resolved: {error}")
if types is None:
types = []
if not isinstance(types, (list, tuple)):
raise TypeError(
f"{self.name} types must be a list/tuple"
" or a function which returns a list/tuple."
)
if not all(isinstance(value, GraphQLObjectType) for value in types):
raise TypeError(f"{self.name} types must be GraphQLObjectType objects.")
return types[:] | python | {
"resource": ""
} |
q29913 | GraphQLInputObjectType.fields | train | def fields(self) -> GraphQLInputFieldMap:
"""Get provided fields, wrap them as GraphQLInputField if needed."""
try:
fields = resolve_thunk(self._fields)
except GraphQLError:
raise
except Exception as error:
raise TypeError(f"{self.name} fields cannot be resolved: {error}")
if not isinstance(fields, dict) or not all(
isinstance(key, str) for key in fields
):
raise TypeError(
f"{self.name} fields must be a dict with field names as keys"
" or a function which returns such an object."
)
if not all(
isinstance(value, GraphQLInputField) or is_input_type(value)
for value in fields.values()
):
raise TypeError(
f"{self.name} fields must be"
" GraphQLInputField or input type objects."
)
return {
name: value
if isinstance(value, GraphQLInputField)
else GraphQLInputField(value)
for name, value in fields.items()
} | python | {
"resource": ""
} |
q29914 | located_error | train | def located_error(
original_error: Union[Exception, GraphQLError],
nodes: Sequence["Node"],
path: Sequence[Union[str, int]],
) -> GraphQLError:
"""Located GraphQL Error
Given an arbitrary Error, presumably thrown while attempting to execute a GraphQL
operation, produce a new GraphQLError aware of the location in the document
responsible for the original Error.
"""
if original_error:
# Note: this uses a brand-check to support GraphQL errors originating from
# other contexts.
try:
if isinstance(original_error.path, list): # type: ignore
return original_error # type: ignore
except AttributeError:
pass
try:
message = original_error.message # type: ignore
except AttributeError:
message = str(original_error)
try:
source = original_error.source # type: ignore
except AttributeError:
source = None
try:
positions = original_error.positions # type: ignore
except AttributeError:
positions = None
try:
nodes = original_error.nodes or nodes # type: ignore
except AttributeError:
pass
return GraphQLError(message, nodes, source, positions, path, original_error) | python | {
"resource": ""
} |
q29915 | assert_valid_name | train | def assert_valid_name(name: str) -> str:
"""Uphold the spec rules about naming."""
error = is_valid_name_error(name)
if error:
raise error
return name | python | {
"resource": ""
} |
q29916 | is_valid_name_error | train | def is_valid_name_error(name: str, node: Node = None) -> Optional[GraphQLError]:
"""Return an Error if a name is invalid."""
if not isinstance(name, str):
raise TypeError("Expected string")
if name.startswith("__"):
return GraphQLError(
f"Name {name!r} must not begin with '__',"
" which is reserved by GraphQL introspection.",
node,
)
if not re_name.match(name):
return GraphQLError(
f"Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but {name!r} does not.", node
)
return None | python | {
"resource": ""
} |
q29917 | subscribe | train | async def subscribe(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
subscribe_field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterator[ExecutionResult], ExecutionResult]:
"""Create a GraphQL subscription.
Implements the "Subscribe" algorithm described in the GraphQL spec.
Returns a coroutine object which yields either an AsyncIterator (if successful) or
an ExecutionResult (client error). The coroutine will raise an exception if a server
error occurs.
If the client-provided arguments to this function do not result in a compliant
subscription, a GraphQL Response (ExecutionResult) with descriptive errors and no
data will be returned.
If the source stream could not be created due to faulty subscription resolver logic
or underlying systems, the coroutine object will yield a single ExecutionResult
containing `errors` and no `data`.
If the operation succeeded, the coroutine will yield an AsyncIterator, which yields
a stream of ExecutionResults representing the response stream.
"""
try:
result_or_stream = await create_source_event_stream(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
subscribe_field_resolver,
)
except GraphQLError as error:
return ExecutionResult(data=None, errors=[error])
if isinstance(result_or_stream, ExecutionResult):
return result_or_stream
result_or_stream = cast(AsyncIterable, result_or_stream)
async def map_source_to_response(payload):
"""Map source to response.
For each payload yielded from a subscription, map it over the normal GraphQL
`execute` function, with `payload` as the `root_value`. This implements the
"MapSourceToResponseEvent" algorithm described in the GraphQL specification.
The `execute` function provides the "ExecuteSubscriptionEvent" algorithm,
as it is nearly identical to the "ExecuteQuery" algorithm, for which `execute`
is also used.
"""
result = execute(
schema,
document,
payload,
context_value,
variable_values,
operation_name,
field_resolver,
)
return await result if isawaitable(result) else result
return MapAsyncIterator(result_or_stream, map_source_to_response) | python | {
"resource": ""
} |
q29918 | create_source_event_stream | train | async def create_source_event_stream(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterable[Any], ExecutionResult]:
"""Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
"""
# If arguments are missing or incorrectly typed, this is an internal developer
# mistake which should throw an early error.
assert_valid_execution_arguments(schema, document, variable_values)
# If a valid context cannot be created due to incorrect arguments, this will throw
# an error.
context = ExecutionContext.build(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
)
# Return early errors if execution context failed.
if isinstance(context, list):
return ExecutionResult(data=None, errors=context)
type_ = get_operation_root_type(schema, context.operation)
fields = context.collect_fields(type_, context.operation.selection_set, {}, set())
response_names = list(fields)
response_name = response_names[0]
field_nodes = fields[response_name]
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(schema, type_, field_name)
if not field_def:
raise GraphQLError(
f"The subscription field '{field_name}' is not defined.", field_nodes
)
# Call the `subscribe()` resolver or the default resolver to produce an
# AsyncIterable yielding raw payloads.
resolve_fn = field_def.subscribe or context.field_resolver
resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy
path = add_path(None, response_name)
info = context.build_resolve_info(field_def, field_nodes, type_, path)
# `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm
# from GraphQL specification. It differs from `resolve_field_value` due to
# providing a different `resolve_fn`.
result = context.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, root_value, info
)
event_stream = await cast(Awaitable, result) if isawaitable(result) else result
# If `event_stream` is an Error, rethrow a located error.
if isinstance(event_stream, Exception):
raise located_error(event_stream, field_nodes, response_path_as_list(path))
# Assert field returned an event stream, otherwise yield an error.
if isinstance(event_stream, AsyncIterable):
return cast(AsyncIterable, event_stream)
raise TypeError(
f"Subscription field must return AsyncIterable. Received: {event_stream!r}"
) | python | {
"resource": ""
} |
q29919 | Visitor.get_visit_fn | train | def get_visit_fn(cls, kind, is_leaving=False) -> Callable:
"""Get the visit function for the given node kind and direction."""
method = "leave" if is_leaving else "enter"
visit_fn = getattr(cls, f"{method}_{kind}", None)
if not visit_fn:
visit_fn = getattr(cls, method, None)
return visit_fn | python | {
"resource": ""
} |
q29920 | validate_schema | train | def validate_schema(schema: GraphQLSchema) -> List[GraphQLError]:
"""Validate a GraphQL schema.
Implements the "Type Validation" sub-sections of the specification's "Type System"
section.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the Schema is valid.
"""
# First check to ensure the provided value is in fact a GraphQLSchema.
assert_schema(schema)
# If this Schema has already been validated, return the previous results.
# noinspection PyProtectedMember
errors = schema._validation_errors
if errors is None:
# Validate the schema, producing a list of errors.
context = SchemaValidationContext(schema)
context.validate_root_types()
context.validate_directives()
context.validate_types()
# Persist the results of validation before returning to ensure validation does
# not run multiple times for this schema.
errors = context.errors
schema._validation_errors = errors
return errors | python | {
"resource": ""
} |
q29921 | assert_valid_schema | train | def assert_valid_schema(schema: GraphQLSchema) -> None:
"""Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid.
"""
errors = validate_schema(schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | python | {
"resource": ""
} |
q29922 | separate_operations | train | def separate_operations(document_ast: DocumentNode) -> Dict[str, DocumentNode]:
"""Separate operations in a given AST document.
This function accepts a single AST document which may contain many operations and
fragments and returns a collection of AST documents each of which contains a single
operation as well the fragment definitions it refers to.
"""
# Populate metadata and build a dependency graph.
visitor = SeparateOperations()
visit(document_ast, visitor)
operations = visitor.operations
fragments = visitor.fragments
positions = visitor.positions
dep_graph = visitor.dep_graph
# For each operation, produce a new synthesized AST which includes only what is
# necessary for completing that operation.
separated_document_asts = {}
for operation in operations:
operation_name = op_name(operation)
dependencies: Set[str] = set()
collect_transitive_dependencies(dependencies, dep_graph, operation_name)
# The list of definition nodes to be included for this operation, sorted to
# retain the same order as the original document.
definitions: List[ExecutableDefinitionNode] = [operation]
for name in dependencies:
definitions.append(fragments[name])
definitions.sort(key=lambda n: positions.get(n, 0))
separated_document_asts[operation_name] = DocumentNode(definitions=definitions)
return separated_document_asts | python | {
"resource": ""
} |
q29923 | collect_transitive_dependencies | train | def collect_transitive_dependencies(
collected: Set[str], dep_graph: DepGraph, from_name: str
) -> None:
"""Collect transitive dependencies.
From a dependency graph, collects a list of transitive dependencies by recursing
through a dependency graph.
"""
immediate_deps = dep_graph[from_name]
for to_name in immediate_deps:
if to_name not in collected:
collected.add(to_name)
collect_transitive_dependencies(collected, dep_graph, to_name) | python | {
"resource": ""
} |
q29924 | ast_from_value | train | def ast_from_value(value: Any, type_: GraphQLInputType) -> Optional[ValueNode]:
"""Produce a GraphQL Value AST given a Python value.
A GraphQL type must be provided, which will be used to interpret different Python
values.
| JSON Value | GraphQL Value |
| ------------- | -------------------- |
| Object | Input Object |
| Array | List |
| Boolean | Boolean |
| String | String / Enum Value |
| Number | Int / Float |
| Mixed | Enum Value |
| null | NullValue |
"""
if is_non_null_type(type_):
type_ = cast(GraphQLNonNull, type_)
ast_value = ast_from_value(value, type_.of_type)
if isinstance(ast_value, NullValueNode):
return None
return ast_value
# only explicit None, not INVALID or NaN
if value is None:
return NullValueNode()
# INVALID or NaN
if is_invalid(value):
return None
# Convert Python list to GraphQL list. If the GraphQLType is a list, but the value
# is not a list, convert the value using the list's item type.
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and not isinstance(value, str):
value_nodes = [
ast_from_value(item, item_type) for item in value # type: ignore
]
return ListValueNode(values=value_nodes)
return ast_from_value(value, item_type) # type: ignore
# Populate the fields of the input object by creating ASTs from each value in the
# Python dict according to the fields in the input type.
if is_input_object_type(type_):
if value is None or not isinstance(value, Mapping):
return None
type_ = cast(GraphQLInputObjectType, type_)
field_nodes: List[ObjectFieldNode] = []
append_node = field_nodes.append
for field_name, field in type_.fields.items():
if field_name in value:
field_value = ast_from_value(value[field_name], field.type)
if field_value:
append_node(
ObjectFieldNode(
name=NameNode(value=field_name), value=field_value
)
)
return ObjectValueNode(fields=field_nodes)
if is_leaf_type(type_):
# Since value is an internally represented value, it must be serialized to an
# externally represented value before converting into an AST.
serialized = type_.serialize(value) # type: ignore
if is_nullish(serialized):
return None
# Others serialize based on their corresponding Python scalar types.
if isinstance(serialized, bool):
return BooleanValueNode(value=serialized)
# Python ints and floats correspond nicely to Int and Float values.
if isinstance(serialized, int):
return IntValueNode(value=f"{serialized:d}")
if isinstance(serialized, float):
return FloatValueNode(value=f"{serialized:g}")
if isinstance(serialized, str):
# Enum types use Enum literals.
if is_enum_type(type_):
return EnumValueNode(value=serialized)
# ID types can use Int literals.
if type_ is GraphQLID and _re_integer_string.match(serialized):
return IntValueNode(value=serialized)
return StringValueNode(value=serialized)
raise TypeError(f"Cannot convert value to AST: {inspect(serialized)}")
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: '{inspect(type_)}'.") | python | {
"resource": ""
} |
q29925 | get_variable_values | train | def get_variable_values(
schema: GraphQLSchema,
var_def_nodes: List[VariableDefinitionNode],
inputs: Dict[str, Any],
) -> CoercedVariableValues:
"""Get coerced variable values based on provided definitions.
Prepares a dict of variable values of the correct type based on the provided
variable definitions and arbitrary input. If the input cannot be parsed to match
the variable definitions, a GraphQLError will be thrown.
"""
errors: List[GraphQLError] = []
coerced_values: Dict[str, Any] = {}
for var_def_node in var_def_nodes:
var_name = var_def_node.variable.name.value
var_type = type_from_ast(schema, var_def_node.type)
if not is_input_type(var_type):
# Must use input types for variables. This should be caught during
# validation, however is checked again here for safety.
errors.append(
GraphQLError(
f"Variable '${var_name}' expected value of type"
f" '{print_ast(var_def_node.type)}'"
" which cannot be used as an input type.",
var_def_node.type,
)
)
else:
var_type = cast(GraphQLInputType, var_type)
has_value = var_name in inputs
value = inputs[var_name] if has_value else INVALID
if not has_value and var_def_node.default_value:
# If no value was provided to a variable with a default value, use the
# default value.
coerced_values[var_name] = value_from_ast(
var_def_node.default_value, var_type
)
elif (not has_value or value is None) and is_non_null_type(var_type):
errors.append(
GraphQLError(
f"Variable '${var_name}' of non-null type"
f" '{var_type}' must not be null."
if has_value
else f"Variable '${var_name}' of required type"
f" '{var_type}' was not provided.",
var_def_node,
)
)
elif has_value:
if value is None:
# If the explicit value `None` was provided, an entry in the
# coerced values must exist as the value `None`.
coerced_values[var_name] = None
else:
# Otherwise, a non-null value was provided, coerce it to the
# expected type or report an error if coercion fails.
coerced = coerce_value(value, var_type, var_def_node)
coercion_errors = coerced.errors
if coercion_errors:
for error in coercion_errors:
error.message = (
f"Variable '${var_name}' got invalid"
f" value {inspect(value)}; {error.message}"
)
errors.extend(coercion_errors)
else:
coerced_values[var_name] = coerced.value
return (
CoercedVariableValues(errors, None)
if errors
else CoercedVariableValues(None, coerced_values)
) | python | {
"resource": ""
} |
q29926 | get_argument_values | train | def get_argument_values(
type_def: Union[GraphQLField, GraphQLDirective],
node: Union[FieldNode, DirectiveNode],
variable_values: Dict[str, Any] = None,
) -> Dict[str, Any]:
"""Get coerced argument values based on provided definitions and nodes.
Prepares an dict of argument values given a list of argument definitions and list
of argument AST nodes.
"""
coerced_values: Dict[str, Any] = {}
arg_defs = type_def.args
arg_nodes = node.arguments
if not arg_defs or arg_nodes is None:
return coerced_values
arg_node_map = {arg.name.value: arg for arg in arg_nodes}
for name, arg_def in arg_defs.items():
arg_type = arg_def.type
argument_node = cast(ArgumentNode, arg_node_map.get(name))
variable_values = cast(Dict[str, Any], variable_values)
if argument_node and isinstance(argument_node.value, VariableNode):
variable_name = argument_node.value.name.value
has_value = variable_values and variable_name in variable_values
is_null = has_value and variable_values[variable_name] is None
else:
has_value = argument_node is not None
is_null = has_value and isinstance(argument_node.value, NullValueNode)
if not has_value and arg_def.default_value is not INVALID:
# If no argument was provided where the definition has a default value,
# use the default value.
coerced_values[name] = arg_def.default_value
elif (not has_value or is_null) and is_non_null_type(arg_type):
# If no argument or a null value was provided to an argument with a non-null
# type (required), produce a field error.
if is_null:
raise GraphQLError(
f"Argument '{name}' of non-null type"
f" '{arg_type}' must not be null.",
argument_node.value,
)
elif argument_node and isinstance(argument_node.value, VariableNode):
raise GraphQLError(
f"Argument '{name}' of required type"
f" '{arg_type}' was provided the variable"
f" '${variable_name}'"
" which was not provided a runtime value.",
argument_node.value,
)
else:
raise GraphQLError(
f"Argument '{name}' of required type '{arg_type}'"
" was not provided.",
node,
)
elif has_value:
if isinstance(argument_node.value, NullValueNode):
# If the explicit value `None` was provided, an entry in the coerced
# values must exist as the value `None`.
coerced_values[name] = None
elif isinstance(argument_node.value, VariableNode):
variable_name = argument_node.value.name.value
# Note: This Does no further checking that this variable is correct.
# This assumes that this query has been validated and the variable
# usage here is of the correct type.
coerced_values[name] = variable_values[variable_name]
else:
value_node = argument_node.value
coerced_value = value_from_ast(value_node, arg_type, variable_values)
if coerced_value is INVALID:
# Note: `values_of_correct_type` validation should catch this before
# execution. This is a runtime check to ensure execution does not
# continue with an invalid argument value.
raise GraphQLError(
f"Argument '{name}'"
f" has invalid value {print_ast(value_node)}.",
argument_node.value,
)
coerced_values[name] = coerced_value
return coerced_values | python | {
"resource": ""
} |
q29927 | get_directive_values | train | def get_directive_values(
directive_def: GraphQLDirective,
node: NodeWithDirective,
variable_values: Dict[str, Any] = None,
) -> Optional[Dict[str, Any]]:
"""Get coerced argument values based on provided nodes.
Prepares a dict of argument values given a directive definition and an AST node
which may contain directives. Optionally also accepts a dict of variable values.
If the directive does not exist on the node, returns None.
"""
directives = node.directives
if directives:
directive_name = directive_def.name
for directive in directives:
if directive.name.value == directive_name:
return get_argument_values(directive_def, directive, variable_values)
return None | python | {
"resource": ""
} |
q29928 | build_ast_schema | train | def build_ast_schema(
document_ast: DocumentNode,
assume_valid: bool = False,
assume_valid_sdl: bool = False,
) -> GraphQLSchema:
"""Build a GraphQL Schema from a given AST.
This takes the ast of a schema document produced by the parse function in
src/language/parser.py.
If no schema definition is provided, then it will look for types named Query
and Mutation.
Given that AST it constructs a GraphQLSchema. The resulting schema has no
resolve methods, so execution will use default resolvers.
When building a schema from a GraphQL service's introspection result, it might
be safe to assume the schema is valid. Set `assume_valid` to True to assume the
produced schema is valid. Set `assume_valid_sdl` to True to assume it is already
a valid SDL document.
"""
if not isinstance(document_ast, DocumentNode):
raise TypeError("Must provide a Document AST.")
if not (assume_valid or assume_valid_sdl):
from ..validation.validate import assert_valid_sdl
assert_valid_sdl(document_ast)
schema_def: Optional[SchemaDefinitionNode] = None
type_defs: List[TypeDefinitionNode] = []
directive_defs: List[DirectiveDefinitionNode] = []
append_directive_def = directive_defs.append
for def_ in document_ast.definitions:
if isinstance(def_, SchemaDefinitionNode):
schema_def = def_
elif isinstance(def_, TypeDefinitionNode):
def_ = cast(TypeDefinitionNode, def_)
type_defs.append(def_)
elif isinstance(def_, DirectiveDefinitionNode):
append_directive_def(def_)
def resolve_type(type_name: str) -> GraphQLNamedType:
type_ = type_map.get(type_name)
if not type:
raise TypeError(f"Type '{type_name}' not found in document.")
return type_
ast_builder = ASTDefinitionBuilder(
assume_valid=assume_valid, resolve_type=resolve_type
)
type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs}
if schema_def:
operation_types = get_operation_types(schema_def)
else:
operation_types = {
OperationType.QUERY: "Query",
OperationType.MUTATION: "Mutation",
OperationType.SUBSCRIPTION: "Subscription",
}
directives = [
ast_builder.build_directive(directive_def) for directive_def in directive_defs
]
# If specified directives were not explicitly declared, add them.
if not any(directive.name == "skip" for directive in directives):
directives.append(GraphQLSkipDirective)
if not any(directive.name == "include" for directive in directives):
directives.append(GraphQLIncludeDirective)
if not any(directive.name == "deprecated" for directive in directives):
directives.append(GraphQLDeprecatedDirective)
query_type = operation_types.get(OperationType.QUERY)
mutation_type = operation_types.get(OperationType.MUTATION)
subscription_type = operation_types.get(OperationType.SUBSCRIPTION)
return GraphQLSchema(
# Note: While this could make early assertions to get the correctly
# typed values below, that would throw immediately while type system
# validation with `validate_schema()` will produce more actionable results.
query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None,
mutation=cast(GraphQLObjectType, type_map.get(mutation_type))
if mutation_type
else None,
subscription=cast(GraphQLObjectType, type_map.get(subscription_type))
if subscription_type
else None,
types=list(type_map.values()),
directives=directives,
ast_node=schema_def,
assume_valid=assume_valid,
) | python | {
"resource": ""
} |
q29929 | get_deprecation_reason | train | def get_deprecation_reason(
node: Union[EnumValueDefinitionNode, FieldDefinitionNode]
) -> Optional[str]:
"""Given a field or enum value node, get deprecation reason as string."""
from ..execution import get_directive_values
deprecated = get_directive_values(GraphQLDeprecatedDirective, node)
return deprecated["reason"] if deprecated else None | python | {
"resource": ""
} |
q29930 | build_schema | train | def build_schema(
source: Union[str, Source],
assume_valid=False,
assume_valid_sdl=False,
no_location=False,
experimental_fragment_variables=False,
) -> GraphQLSchema:
"""Build a GraphQLSchema directly from a source document."""
return build_ast_schema(
parse(
source,
no_location=no_location,
experimental_fragment_variables=experimental_fragment_variables,
),
assume_valid=assume_valid,
assume_valid_sdl=assume_valid_sdl,
) | python | {
"resource": ""
} |
q29931 | validate | train | def validate(
schema: GraphQLSchema,
document_ast: DocumentNode,
rules: Sequence[RuleType] = None,
type_info: TypeInfo = None,
) -> List[GraphQLError]:
"""Implements the "Validation" section of the spec.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the document is valid.
A list of specific validation rules may be provided. If not provided, the default
list of rules defined by the GraphQL specification will be used.
Each validation rule is a ValidationRule object which is a visitor object that holds
a ValidationContext (see the language/visitor API). Visitor methods are expected to
return GraphQLErrors, or lists of GraphQLErrors when invalid.
Optionally a custom TypeInfo instance may be provided. If not provided, one will be
created from the provided schema.
"""
if not document_ast or not isinstance(document_ast, DocumentNode):
raise TypeError("You must provide a document node.")
# If the schema used for validation is invalid, throw an error.
assert_valid_schema(schema)
if type_info is None:
type_info = TypeInfo(schema)
elif not isinstance(type_info, TypeInfo):
raise TypeError(f"Not a TypeInfo object: {inspect(type_info)}")
if rules is None:
rules = specified_rules
elif not isinstance(rules, (list, tuple)):
raise TypeError("Rules must be passed as a list/tuple.")
context = ValidationContext(schema, document_ast, type_info)
# This uses a specialized visitor which runs multiple visitors in parallel,
# while maintaining the visitor skip and break API.
visitors = [rule(context) for rule in rules]
# Visit the whole document with each instance of all provided rules.
visit(document_ast, TypeInfoVisitor(type_info, ParallelVisitor(visitors)))
return context.errors | python | {
"resource": ""
} |
q29932 | validate_sdl | train | def validate_sdl(
document_ast: DocumentNode,
schema_to_extend: GraphQLSchema = None,
rules: Sequence[RuleType] = None,
) -> List[GraphQLError]:
"""Validate an SDL document."""
context = SDLValidationContext(document_ast, schema_to_extend)
if rules is None:
rules = specified_sdl_rules
visitors = [rule(context) for rule in rules]
visit(document_ast, ParallelVisitor(visitors))
return context.errors | python | {
"resource": ""
} |
q29933 | assert_valid_sdl | train | def assert_valid_sdl(document_ast: DocumentNode) -> None:
"""Assert document is valid SDL.
Utility function which asserts a SDL document is valid by throwing an error if it
is invalid.
"""
errors = validate_sdl(document_ast)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | python | {
"resource": ""
} |
q29934 | assert_valid_sdl_extension | train | def assert_valid_sdl_extension(
document_ast: DocumentNode, schema: GraphQLSchema
) -> None:
"""Assert document is a valid SDL extension.
Utility function which asserts a SDL document is valid by throwing an error if it
is invalid.
"""
errors = validate_sdl(document_ast, schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | python | {
"resource": ""
} |
q29935 | type_from_ast | train | def type_from_ast(schema, type_node): # noqa: F811
"""Get the GraphQL type definition from an AST node.
Given a Schema and an AST node describing a type, return a GraphQLType definition
which applies to that type. For example, if provided the parsed AST node for
`[User]`, a GraphQLList instance will be returned, containing the type called
"User" found in the schema. If a type called "User" is not found in the schema,
then None will be returned.
"""
if isinstance(type_node, ListTypeNode):
inner_type = type_from_ast(schema, type_node.type)
return GraphQLList(inner_type) if inner_type else None
if isinstance(type_node, NonNullTypeNode):
inner_type = type_from_ast(schema, type_node.type)
return GraphQLNonNull(inner_type) if inner_type else None
if isinstance(type_node, NamedTypeNode):
return schema.get_type(type_node.name.value)
# Not reachable. All possible type nodes have been considered.
raise TypeError( # pragma: no cover
f"Unexpected type node: '{inspect(type_node)}'."
) | python | {
"resource": ""
} |
q29936 | get_suggested_type_names | train | def get_suggested_type_names(
schema: GraphQLSchema, type_: GraphQLOutputType, field_name: str
) -> List[str]:
"""
Get a list of suggested type names.
Go through all of the implementations of type, as well as the interfaces
that they implement. If any of those types include the provided field,
suggest them, sorted by how often the type is referenced, starting with
Interfaces.
"""
if is_abstract_type(type_):
type_ = cast(GraphQLAbstractType, type_)
suggested_object_types = []
interface_usage_count: Dict[str, int] = defaultdict(int)
for possible_type in schema.get_possible_types(type_):
if field_name not in possible_type.fields:
continue
# This object type defines this field.
suggested_object_types.append(possible_type.name)
for possible_interface in possible_type.interfaces:
if field_name not in possible_interface.fields:
continue
# This interface type defines this field.
interface_usage_count[possible_interface.name] += 1
# Suggest interface types based on how common they are.
suggested_interface_types = sorted(
interface_usage_count, key=lambda k: -interface_usage_count[k]
)
# Suggest both interface and object types.
return suggested_interface_types + suggested_object_types
# Otherwise, must be an Object type, which does not have possible fields.
return [] | python | {
"resource": ""
} |
q29937 | get_suggested_field_names | train | def get_suggested_field_names(type_: GraphQLOutputType, field_name: str) -> List[str]:
"""Get a list of suggested field names.
For the field name provided, determine if there are any similar field names that may
be the result of a typo.
"""
if is_object_type(type_) or is_interface_type(type_):
possible_field_names = list(type_.fields) # type: ignore
return suggestion_list(field_name, possible_field_names)
# Otherwise, must be a Union type, which does not define fields.
return [] | python | {
"resource": ""
} |
q29938 | parse | train | def parse(
source: SourceType, no_location=False, experimental_fragment_variables=False
) -> DocumentNode:
"""Given a GraphQL source, parse it into a Document.
Throws GraphQLError if a syntax error is encountered.
By default, the parser creates AST nodes that know the location in the source that
they correspond to. The `no_location` option disables that behavior for performance
or testing.
Experimental features:
If `experimental_fragment_variables` is set to True, the parser will understand
and parse variable definitions contained in a fragment definition. They'll be
represented in the `variable_definitions` field of the `FragmentDefinitionNode`.
The syntax is identical to normal, query-defined variables. For example::
fragment A($var: Boolean = false) on T {
...
}
"""
if isinstance(source, str):
source = Source(source)
elif not isinstance(source, Source):
raise TypeError(f"Must provide Source. Received: {inspect(source)}")
lexer = Lexer(
source,
no_location=no_location,
experimental_fragment_variables=experimental_fragment_variables,
)
return parse_document(lexer) | python | {
"resource": ""
} |
q29939 | parse_value | train | def parse_value(source: SourceType, **options: dict) -> ValueNode:
"""Parse the AST for a given string containing a GraphQL value.
Throws GraphQLError if a syntax error is encountered.
This is useful within tools that operate upon GraphQL Values directly and in
isolation of complete GraphQL documents.
Consider providing the results to the utility function: `value_from_ast()`.
"""
if isinstance(source, str):
source = Source(source)
lexer = Lexer(source, **options)
expect_token(lexer, TokenKind.SOF)
value = parse_value_literal(lexer, False)
expect_token(lexer, TokenKind.EOF)
return value | python | {
"resource": ""
} |
q29940 | parse_type | train | def parse_type(source: SourceType, **options: dict) -> TypeNode:
"""Parse the AST for a given string containing a GraphQL Type.
Throws GraphQLError if a syntax error is encountered.
This is useful within tools that operate upon GraphQL Types directly and
in isolation of complete GraphQL documents.
Consider providing the results to the utility function: `type_from_ast()`.
"""
if isinstance(source, str):
source = Source(source)
lexer = Lexer(source, **options)
expect_token(lexer, TokenKind.SOF)
type_ = parse_type_reference(lexer)
expect_token(lexer, TokenKind.EOF)
return type_ | python | {
"resource": ""
} |
q29941 | parse_name | train | def parse_name(lexer: Lexer) -> NameNode:
"""Convert a name lex token into a name parse node."""
token = expect_token(lexer, TokenKind.NAME)
return NameNode(value=token.value, loc=loc(lexer, token)) | python | {
"resource": ""
} |
q29942 | parse_fragment | train | def parse_fragment(lexer: Lexer) -> Union[FragmentSpreadNode, InlineFragmentNode]:
"""Corresponds to both FragmentSpread and InlineFragment in the spec.
FragmentSpread: ... FragmentName Directives?
InlineFragment: ... TypeCondition? Directives? SelectionSet
"""
start = lexer.token
expect_token(lexer, TokenKind.SPREAD)
has_type_condition = expect_optional_keyword(lexer, "on")
if not has_type_condition and peek(lexer, TokenKind.NAME):
return FragmentSpreadNode(
name=parse_fragment_name(lexer),
directives=parse_directives(lexer, False),
loc=loc(lexer, start),
)
return InlineFragmentNode(
type_condition=parse_named_type(lexer) if has_type_condition else None,
directives=parse_directives(lexer, False),
selection_set=parse_selection_set(lexer),
loc=loc(lexer, start),
) | python | {
"resource": ""
} |
q29943 | loc | train | def loc(lexer: Lexer, start_token: Token) -> Optional[Location]:
"""Return a location object.
Used to identify the place in the source that created a given parsed object.
"""
if not lexer.no_location:
end_token = lexer.last_token
source = lexer.source
return Location(
start_token.start, end_token.end, start_token, end_token, source
)
return None | python | {
"resource": ""
} |
q29944 | expect_token | train | def expect_token(lexer: Lexer, kind: TokenKind) -> Token:
"""Expect the next token to be of the given kind.
If the next token is of the given kind, return that token after advancing the lexer.
Otherwise, do not change the parser state and throw an error.
"""
token = lexer.token
if token.kind == kind:
lexer.advance()
return token
raise GraphQLSyntaxError(
lexer.source, token.start, f"Expected {kind.value}, found {token.kind.value}"
) | python | {
"resource": ""
} |
q29945 | expect_optional_token | train | def expect_optional_token(lexer: Lexer, kind: TokenKind) -> Optional[Token]:
"""Expect the next token optionally to be of the given kind.
If the next token is of the given kind, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == kind:
lexer.advance()
return token
return None | python | {
"resource": ""
} |
q29946 | expect_keyword | train | def expect_keyword(lexer: Lexer, value: str) -> Token:
"""Expect the next token to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and throw an error.
"""
token = lexer.token
if token.kind == TokenKind.NAME and token.value == value:
lexer.advance()
return token
raise GraphQLSyntaxError(
lexer.source, token.start, f"Expected {value!r}, found {token.desc}"
) | python | {
"resource": ""
} |
q29947 | expect_optional_keyword | train | def expect_optional_keyword(lexer: Lexer, value: str) -> Optional[Token]:
"""Expect the next token optionally to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == TokenKind.NAME and token.value == value:
lexer.advance()
return token
return None | python | {
"resource": ""
} |
q29948 | unexpected | train | def unexpected(lexer: Lexer, at_token: Token = None) -> GraphQLError:
"""Create an error when an unexpected lexed token is encountered."""
token = at_token or lexer.token
return GraphQLSyntaxError(lexer.source, token.start, f"Unexpected {token.desc}") | python | {
"resource": ""
} |
q29949 | many_nodes | train | def many_nodes(
lexer: Lexer,
open_kind: TokenKind,
parse_fn: Callable[[Lexer], Node],
close_kind: TokenKind,
) -> List[Node]:
"""Fetch matching nodes, at least one.
Returns a non-empty list of parse nodes, determined by the `parse_fn`.
This list begins with a lex token of `open_kind` and ends with a lex token of
`close_kind`. Advances the parser to the next lex token after the closing token.
"""
expect_token(lexer, open_kind)
nodes = [parse_fn(lexer)]
append = nodes.append
while not expect_optional_token(lexer, close_kind):
append(parse_fn(lexer))
return nodes | python | {
"resource": ""
} |
q29950 | coercion_error | train | def coercion_error(
message: str,
blame_node: Node = None,
path: Path = None,
sub_message: str = None,
original_error: Exception = None,
) -> GraphQLError:
"""Return a GraphQLError instance"""
if path:
path_str = print_path(path)
message += f" at {path_str}"
message += f"; {sub_message}" if sub_message else "."
# noinspection PyArgumentEqualDefault
return GraphQLError(message, blame_node, None, None, None, original_error) | python | {
"resource": ""
} |
q29951 | print_path | train | def print_path(path: Path) -> str:
"""Build string describing the path into the value where error was found"""
path_str = ""
current_path: Optional[Path] = path
while current_path:
path_str = (
f".{current_path.key}"
if isinstance(current_path.key, str)
else f"[{current_path.key}]"
) + path_str
current_path = current_path.prev
return f"value{path_str}" if path_str else "" | python | {
"resource": ""
} |
q29952 | parse_int_literal | train | def parse_int_literal(ast, _variables=None):
"""Parse an integer value node in the AST."""
if isinstance(ast, IntValueNode):
num = int(ast.value)
if MIN_INT <= num <= MAX_INT:
return num
return INVALID | python | {
"resource": ""
} |
q29953 | parse_float_literal | train | def parse_float_literal(ast, _variables=None):
"""Parse a float value node in the AST."""
if isinstance(ast, (FloatValueNode, IntValueNode)):
return float(ast.value)
return INVALID | python | {
"resource": ""
} |
q29954 | parse_id_literal | train | def parse_id_literal(ast, _variables=None):
"""Parse an ID value node in the AST."""
if isinstance(ast, (StringValueNode, IntValueNode)):
return ast.value
return INVALID | python | {
"resource": ""
} |
q29955 | find_breaking_changes | train | def find_breaking_changes(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find breaking changes.
Given two schemas, returns a list containing descriptions of all the types of
breaking changes covered by the other functions down below.
"""
return (
find_removed_types(old_schema, new_schema)
+ find_types_that_changed_kind(old_schema, new_schema)
+ find_fields_that_changed_type_on_object_or_interface_types(
old_schema, new_schema
)
+ find_fields_that_changed_type_on_input_object_types(
old_schema, new_schema
).breaking_changes
+ find_types_removed_from_unions(old_schema, new_schema)
+ find_values_removed_from_enums(old_schema, new_schema)
+ find_arg_changes(old_schema, new_schema).breaking_changes
+ find_interfaces_removed_from_object_types(old_schema, new_schema)
+ find_removed_directives(old_schema, new_schema)
+ find_removed_directive_args(old_schema, new_schema)
+ find_added_non_null_directive_args(old_schema, new_schema)
+ find_removed_directive_locations(old_schema, new_schema)
) | python | {
"resource": ""
} |
q29956 | find_dangerous_changes | train | def find_dangerous_changes(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[DangerousChange]:
"""Find dangerous changes.
Given two schemas, returns a list containing descriptions of all the types of
potentially dangerous changes covered by the other functions down below.
"""
return (
find_arg_changes(old_schema, new_schema).dangerous_changes
+ find_values_added_to_enums(old_schema, new_schema)
+ find_interfaces_added_to_object_types(old_schema, new_schema)
+ find_types_added_to_unions(old_schema, new_schema)
+ find_fields_that_changed_type_on_input_object_types(
old_schema, new_schema
).dangerous_changes
) | python | {
"resource": ""
} |
q29957 | find_removed_types | train | def find_removed_types(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find removed types.
Given two schemas, returns a list containing descriptions of any breaking changes
in the newSchema related to removing an entire type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
breaking_changes = []
for type_name in old_type_map:
if type_name not in new_type_map:
breaking_changes.append(
BreakingChange(
BreakingChangeType.TYPE_REMOVED, f"{type_name} was removed."
)
)
return breaking_changes | python | {
"resource": ""
} |
q29958 | find_types_that_changed_kind | train | def find_types_that_changed_kind(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find types that changed kind
Given two schemas, returns a list containing descriptions of any breaking changes
in the newSchema related to changing the type of a type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
breaking_changes = []
for type_name in old_type_map:
if type_name not in new_type_map:
continue
old_type = old_type_map[type_name]
new_type = new_type_map[type_name]
if old_type.__class__ is not new_type.__class__:
breaking_changes.append(
BreakingChange(
BreakingChangeType.TYPE_CHANGED_KIND,
f"{type_name} changed from {type_kind_name(old_type)}"
f" to {type_kind_name(new_type)}.",
)
)
return breaking_changes | python | {
"resource": ""
} |
q29959 | find_arg_changes | train | def find_arg_changes(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> BreakingAndDangerousChanges:
"""Find argument changes.
Given two schemas, returns a list containing descriptions of any breaking or
dangerous changes in the new_schema related to arguments (such as removal or change
of type of an argument, or a change in an argument's default value).
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
breaking_changes: List[BreakingChange] = []
dangerous_changes: List[DangerousChange] = []
for type_name, old_type in old_type_map.items():
new_type = new_type_map.get(type_name)
if (
not (is_object_type(old_type) or is_interface_type(old_type))
or not (is_object_type(new_type) or is_interface_type(new_type))
or new_type.__class__ is not old_type.__class__
):
continue
old_type = cast(Union[GraphQLObjectType, GraphQLInterfaceType], old_type)
new_type = cast(Union[GraphQLObjectType, GraphQLInterfaceType], new_type)
old_type_fields = old_type.fields
new_type_fields = new_type.fields
for field_name in old_type_fields:
if field_name not in new_type_fields:
continue
old_args = old_type_fields[field_name].args
new_args = new_type_fields[field_name].args
for arg_name, old_arg in old_args.items():
new_arg = new_args.get(arg_name)
if not new_arg:
# Arg not present
breaking_changes.append(
BreakingChange(
BreakingChangeType.ARG_REMOVED,
f"{old_type.name}.{field_name} arg"
f" {arg_name} was removed",
)
)
continue
is_safe = is_change_safe_for_input_object_field_or_field_arg(
old_arg.type, new_arg.type
)
if not is_safe:
breaking_changes.append(
BreakingChange(
BreakingChangeType.ARG_CHANGED_KIND,
f"{old_type.name}.{field_name} arg"
f" {arg_name} has changed type from"
f" {old_arg.type} to {new_arg.type}",
)
)
elif (
old_arg.default_value is not INVALID
and old_arg.default_value != new_arg.default_value
):
dangerous_changes.append(
DangerousChange(
DangerousChangeType.ARG_DEFAULT_VALUE_CHANGE,
f"{old_type.name}.{field_name} arg"
f" {arg_name} has changed defaultValue",
)
)
# Check if arg was added to the field
for arg_name in new_args:
if arg_name not in old_args:
new_arg_def = new_args[arg_name]
if is_required_argument(new_arg_def):
breaking_changes.append(
BreakingChange(
BreakingChangeType.REQUIRED_ARG_ADDED,
f"A required arg {arg_name} on"
f" {type_name}.{field_name} was added",
)
)
else:
dangerous_changes.append(
DangerousChange(
DangerousChangeType.OPTIONAL_ARG_ADDED,
f"An optional arg {arg_name} on"
f" {type_name}.{field_name} was added",
)
)
return BreakingAndDangerousChanges(breaking_changes, dangerous_changes) | python | {
"resource": ""
} |
q29960 | find_types_removed_from_unions | train | def find_types_removed_from_unions(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find types removed from unions.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing types from a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_removed_from_union = []
for old_type_name, old_type in old_type_map.items():
new_type = new_type_map.get(old_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_new_union = {type_.name for type_ in new_type.types}
for type_ in old_type.types:
type_name = type_.name
if type_name not in type_names_in_new_union:
types_removed_from_union.append(
BreakingChange(
BreakingChangeType.TYPE_REMOVED_FROM_UNION,
f"{type_name} was removed from union type {old_type_name}.",
)
)
return types_removed_from_union | python | {
"resource": ""
} |
q29961 | find_types_added_to_unions | train | def find_types_added_to_unions(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[DangerousChange]:
"""Find types added to union.
Given two schemas, returns a list containing descriptions of any dangerous changes
in the new_schema related to adding types to a union type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
types_added_to_union = []
for new_type_name, new_type in new_type_map.items():
old_type = old_type_map.get(new_type_name)
if not (is_union_type(old_type) and is_union_type(new_type)):
continue
old_type = cast(GraphQLUnionType, old_type)
new_type = cast(GraphQLUnionType, new_type)
type_names_in_old_union = {type_.name for type_ in old_type.types}
for type_ in new_type.types:
type_name = type_.name
if type_name not in type_names_in_old_union:
types_added_to_union.append(
DangerousChange(
DangerousChangeType.TYPE_ADDED_TO_UNION,
f"{type_name} was added to union type {new_type_name}.",
)
)
return types_added_to_union | python | {
"resource": ""
} |
q29962 | find_values_removed_from_enums | train | def find_values_removed_from_enums(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[BreakingChange]:
"""Find values removed from enums.
Given two schemas, returns a list containing descriptions of any breaking changes
in the new_schema related to removing values from an enum type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
values_removed_from_enums = []
for type_name, old_type in old_type_map.items():
new_type = new_type_map.get(type_name)
if not (is_enum_type(old_type) and is_enum_type(new_type)):
continue
old_type = cast(GraphQLEnumType, old_type)
new_type = cast(GraphQLEnumType, new_type)
values_in_new_enum = new_type.values
for value_name in old_type.values:
if value_name not in values_in_new_enum:
values_removed_from_enums.append(
BreakingChange(
BreakingChangeType.VALUE_REMOVED_FROM_ENUM,
f"{value_name} was removed from enum type {type_name}.",
)
)
return values_removed_from_enums | python | {
"resource": ""
} |
q29963 | find_values_added_to_enums | train | def find_values_added_to_enums(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[DangerousChange]:
"""Find values added to enums.
Given two schemas, returns a list containing descriptions of any dangerous changes
in the new_schema related to adding values to an enum type.
"""
old_type_map = old_schema.type_map
new_type_map = new_schema.type_map
values_added_to_enums = []
for type_name, old_type in old_type_map.items():
new_type = new_type_map.get(type_name)
if not (is_enum_type(old_type) and is_enum_type(new_type)):
continue
old_type = cast(GraphQLEnumType, old_type)
new_type = cast(GraphQLEnumType, new_type)
values_in_old_enum = old_type.values
for value_name in new_type.values:
if value_name not in values_in_old_enum:
values_added_to_enums.append(
DangerousChange(
DangerousChangeType.VALUE_ADDED_TO_ENUM,
f"{value_name} was added to enum type {type_name}.",
)
)
return values_added_to_enums | python | {
"resource": ""
} |
q29964 | is_finite | train | def is_finite(value: Any) -> bool:
"""Return true if a value is a finite number."""
return isinstance(value, int) or (isinstance(value, float) and isfinite(value)) | python | {
"resource": ""
} |
q29965 | find_conflicts_within_selection_set | train | def find_conflicts_within_selection_set(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
) -> List[Conflict]:
"""Find conflicts within selection set.
Find all conflicts found "within" a selection set, including those found via
spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts: List[Conflict] = []
field_map, fragment_names = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
field_map,
)
if fragment_names:
compared_fragments: Set[str] = set()
# (B) Then collect conflicts between these fields and those represented by each
# spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
compared_fragment_pairs,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other
# item in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
False,
fragment_name,
other_fragment_name,
)
return conflicts | python | {
"resource": ""
} |
q29966 | collect_conflicts_between_fields_and_fragment | train | def collect_conflicts_between_fields_and_fragment(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragments: Set[str],
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
field_map: NodeAndDefCollection,
fragment_name: str,
) -> None:
"""Collect conflicts between fields and fragment.
Collect all conflicts found between a set of fields and a fragment reference
including via spreading in any nested fragments.
"""
# Memoize so a fragment is not compared for conflicts more than once.
if fragment_name in compared_fragments:
return
compared_fragments.add(fragment_name)
fragment = context.get_fragment(fragment_name)
if not fragment:
return None
field_map2, fragment_names2 = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment
)
# Do not compare a fragment's fieldMap to itself.
if field_map is field_map2:
return
# (D) First collect any conflicts between the provided collection of fields and the
# collection of fields represented by the given fragment.
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map,
field_map2,
)
# (E) Then collect any conflicts between the provided collection of fields and any
# fragment names found in the given fragment.
for fragment_name2 in fragment_names2:
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
compared_fragment_pairs,
are_mutually_exclusive,
field_map,
fragment_name2,
) | python | {
"resource": ""
} |
q29967 | collect_conflicts_between_fragments | train | def collect_conflicts_between_fragments(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
fragment_name1: str,
fragment_name2: str,
) -> None:
"""Collect conflicts between fragments.
Collect all conflicts found between two fragments, including via spreading in any
nested fragments.
"""
# No need to compare a fragment to itself.
if fragment_name1 == fragment_name2:
return
# Memoize so two fragments are not compared for conflicts more than once.
if compared_fragment_pairs.has(
fragment_name1, fragment_name2, are_mutually_exclusive
):
return
compared_fragment_pairs.add(fragment_name1, fragment_name2, are_mutually_exclusive)
fragment1 = context.get_fragment(fragment_name1)
fragment2 = context.get_fragment(fragment_name2)
if not fragment1 or not fragment2:
return None
field_map1, fragment_names1 = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment1
)
field_map2, fragment_names2 = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment2
)
# (F) First, collect all conflicts between these two collections of fields
# (not including any nested fragments)
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (G) Then collect conflicts between the first fragment and any nested fragments
# spread in the second fragment.
for nested_fragment_name2 in fragment_names2:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
fragment_name1,
nested_fragment_name2,
)
# (G) Then collect conflicts between the second fragment and any nested fragments
# spread in the first fragment.
for nested_fragment_name1 in fragment_names1:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
nested_fragment_name1,
fragment_name2,
) | python | {
"resource": ""
} |
q29968 | find_conflicts_between_sub_selection_sets | train | def find_conflicts_between_sub_selection_sets(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
parent_type1: Optional[GraphQLNamedType],
selection_set1: SelectionSetNode,
parent_type2: Optional[GraphQLNamedType],
selection_set2: SelectionSetNode,
) -> List[Conflict]:
"""Find conflicts between sub selection sets.
Find all conflicts found between two selection sets, including those found via
spreading in fragments. Called when determining if conflicts exist between the
sub-fields of two overlapping fields.
"""
conflicts: List[Conflict] = []
field_map1, fragment_names1 = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type1, selection_set1
)
field_map2, fragment_names2 = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type2, selection_set2
)
# (H) First, collect all conflicts between these two collections of field.
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (I) Then collect conflicts between the first collection of fields and those
# referenced by each fragment name associated with the second.
if fragment_names2:
compared_fragments: Set[str] = set()
for fragment_name2 in fragment_names2:
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
fragment_name2,
)
# (I) Then collect conflicts between the second collection of fields and those
# referenced by each fragment name associated with the first.
if fragment_names1:
compared_fragments = set()
for fragment_name1 in fragment_names1:
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
compared_fragment_pairs,
are_mutually_exclusive,
field_map2,
fragment_name1,
)
# (J) Also collect conflicts between any fragment names by the first and fragment
# names by the second. This compares each item in the first set of names to each
# item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
fragment_name1,
fragment_name2,
)
return conflicts | python | {
"resource": ""
} |
q29969 | find_conflict | train | def find_conflict(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_fields_are_mutually_exclusive: bool,
response_name: str,
field1: NodeAndDef,
field2: NodeAndDef,
) -> Optional[Conflict]:
"""Find conflict.
Determines if there is a conflict between two particular fields, including comparing
their sub-fields.
"""
parent_type1, node1, def1 = field1
parent_type2, node2, def2 = field2
# If it is known that two fields could not possibly apply at the same time, due to
# the parent types, then it is safe to permit them to diverge in aliased field or
# arguments used as they will not present any ambiguity by differing. It is known
# that two parent types could never overlap if they are different Object types.
# Interface or Union types might overlap - if not in the current state of the
# schema, then perhaps in some future version, thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and is_object_type(parent_type1)
and is_object_type(parent_type2)
)
# The return type for each field.
type1 = cast(Optional[GraphQLOutputType], def1 and def1.type)
type2 = cast(Optional[GraphQLOutputType], def2 and def2.type)
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = node1.name.value
name2 = node2.name.value
if name1 != name2:
return (
(response_name, f"{name1} and {name2} are different fields"),
[node1],
[node2],
)
# Two field calls must have the same arguments.
if not same_arguments(node1.arguments or [], node2.arguments or []):
return (response_name, "they have differing arguments"), [node1], [node2]
if type1 and type2 and do_types_conflict(type1, type2):
return (
(response_name, f"they return conflicting types {type1} and {type2}"),
[node1],
[node2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list for
# both collections so fields in a fragment reference are never compared to
# themselves.
selection_set1 = node1.selection_set
selection_set2 = node2.selection_set
if selection_set1 and selection_set2:
conflicts = find_conflicts_between_sub_selection_sets(
context,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
get_named_type(type1),
selection_set1,
get_named_type(type2),
selection_set2,
)
return subfield_conflicts(conflicts, response_name, node1, node2)
return None | python | {
"resource": ""
} |
q29970 | do_types_conflict | train | def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool:
"""Check whether two types conflict
Two types conflict if both types could not apply to a value simultaneously.
Composite types are ignored as their individual field types will be compared later
recursively. However List and Non-Null types must match.
"""
if is_list_type(type1):
return (
do_types_conflict(
cast(GraphQLList, type1).of_type, cast(GraphQLList, type2).of_type
)
if is_list_type(type2)
else True
)
if is_list_type(type2):
return True
if is_non_null_type(type1):
return (
do_types_conflict(
cast(GraphQLNonNull, type1).of_type, cast(GraphQLNonNull, type2).of_type
)
if is_non_null_type(type2)
else True
)
if is_non_null_type(type2):
return True
if is_leaf_type(type1) or is_leaf_type(type2):
return type1 is not type2
return False | python | {
"resource": ""
} |
q29971 | get_fields_and_fragment_names | train | def get_fields_and_fragment_names(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
) -> Tuple[NodeAndDefCollection, List[str]]:
"""Get fields and referenced fragment names
Given a selection set, return the collection of fields (a mapping of response name
to field nodes and definitions) as well as a list of fragment names referenced via
fragment spreads.
"""
cached = cached_fields_and_fragment_names.get(selection_set)
if not cached:
node_and_defs: NodeAndDefCollection = {}
fragment_names: Dict[str, bool] = {}
collect_fields_and_fragment_names(
context, parent_type, selection_set, node_and_defs, fragment_names
)
cached = (node_and_defs, list(fragment_names))
cached_fields_and_fragment_names[selection_set] = cached
return cached | python | {
"resource": ""
} |
q29972 | get_referenced_fields_and_fragment_names | train | def get_referenced_fields_and_fragment_names(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
fragment: FragmentDefinitionNode,
) -> Tuple[NodeAndDefCollection, List[str]]:
"""Get referenced fields and nested fragment names
Given a reference to a fragment, return the represented collection of fields as well
as a list of nested fragment names referenced via fragment spreads.
"""
# Short-circuit building a type from the node if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast(context.schema, fragment.type_condition)
return get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set
) | python | {
"resource": ""
} |
q29973 | subfield_conflicts | train | def subfield_conflicts(
conflicts: List[Conflict], response_name: str, node1: FieldNode, node2: FieldNode
) -> Optional[Conflict]:
"""Check whether there are conflicts between sub-fields.
Given a series of Conflicts which occurred between two sub-fields, generate a single
Conflict.
"""
if conflicts:
return (
(response_name, [conflict[0] for conflict in conflicts]),
list(chain([node1], *[conflict[1] for conflict in conflicts])),
list(chain([node2], *[conflict[2] for conflict in conflicts])),
)
return None | python | {
"resource": ""
} |
q29974 | EventEmitter.add_listener | train | def add_listener(self, event_name: str, listener: Callable):
"""Add a listener."""
self.listeners[event_name].append(listener)
return self | python | {
"resource": ""
} |
q29975 | EventEmitter.remove_listener | train | def remove_listener(self, event_name, listener):
"""Removes a listener."""
self.listeners[event_name].remove(listener)
return self | python | {
"resource": ""
} |
q29976 | introspection_from_schema | train | def introspection_from_schema(
schema: GraphQLSchema, descriptions: bool = True
) -> IntrospectionSchema:
"""Build an IntrospectionQuery from a GraphQLSchema
IntrospectionQuery is useful for utilities that care about type and field
relationships, but do not need to traverse through those relationships.
This is the inverse of build_client_schema. The primary use case is outside of the
server context, for instance when doing schema comparisons.
"""
query_ast = parse(get_introspection_query(descriptions))
from ..execution.execute import execute, ExecutionResult
result = execute(schema, query_ast)
if not isinstance(result, ExecutionResult):
raise RuntimeError("Introspection cannot be executed")
if result.errors or not result.data:
raise result.errors[0] if result.errors else GraphQLError(
"Introspection did not return a result"
)
return result.data | python | {
"resource": ""
} |
q29977 | find_deprecated_usages | train | def find_deprecated_usages(
schema: GraphQLSchema, ast: DocumentNode
) -> List[GraphQLError]:
"""Get a list of GraphQLError instances describing each deprecated use."""
type_info = TypeInfo(schema)
visitor = FindDeprecatedUsages(type_info)
visit(ast, TypeInfoVisitor(type_info, visitor))
return visitor.errors | python | {
"resource": ""
} |
q29978 | snake_to_camel | train | def snake_to_camel(s, upper=True):
"""Convert from snake_case to CamelCase
If upper is set, then convert to upper CamelCase, otherwise the first character
keeps its case.
"""
s = _re_snake_to_camel.sub(lambda m: m.group(2).upper(), s)
if upper:
s = s[:1].upper() + s[1:]
return s | python | {
"resource": ""
} |
q29979 | ValuesOfCorrectTypeRule.is_valid_scalar | train | def is_valid_scalar(self, node: ValueNode) -> None:
"""Check whether this is a valid scalar.
Any value literal may be a valid representation of a Scalar, depending on that
scalar type.
"""
# Report any error at the full type expected by the location.
location_type = self.context.get_input_type()
if not location_type:
return
type_ = get_named_type(location_type)
if not is_scalar_type(type_):
self.report_error(
GraphQLError(
bad_value_message(
location_type,
print_ast(node),
enum_type_suggestion(type_, node),
),
node,
)
)
return
# Scalars determine if a literal value is valid via `parse_literal()` which may
# throw or return an invalid value to indicate failure.
type_ = cast(GraphQLScalarType, type_)
try:
parse_result = type_.parse_literal(node)
if is_invalid(parse_result):
self.report_error(
GraphQLError(
bad_value_message(location_type, print_ast(node)), node
)
)
except Exception as error:
# Ensure a reference to the original error is maintained.
self.report_error(
GraphQLError(
bad_value_message(location_type, print_ast(node), str(error)),
node,
original_error=error,
)
) | python | {
"resource": ""
} |
q29980 | is_missing_variable | train | def is_missing_variable(
value_node: ValueNode, variables: Dict[str, Any] = None
) -> bool:
"""Check if `value_node` is a variable not defined in the `variables` dict."""
return isinstance(value_node, VariableNode) and (
not variables or is_invalid(variables.get(value_node.name.value, INVALID))
) | python | {
"resource": ""
} |
q29981 | is_integer | train | def is_integer(value: Any) -> bool:
"""Return true if a value is an integer number."""
return (isinstance(value, int) and not isinstance(value, bool)) or (
isinstance(value, float) and isfinite(value) and int(value) == value
) | python | {
"resource": ""
} |
q29982 | allowed_variable_usage | train | def allowed_variable_usage(
schema: GraphQLSchema,
var_type: GraphQLType,
var_default_value: Optional[ValueNode],
location_type: GraphQLType,
location_default_value: Any,
) -> bool:
"""Check for allowed variable usage.
Returns True if the variable is allowed in the location it was found, which includes
considering if default values exist for either the variable or the location at which
it is located.
"""
if is_non_null_type(location_type) and not is_non_null_type(var_type):
has_non_null_variable_default_value = var_default_value and not isinstance(
var_default_value, NullValueNode
)
has_location_default_value = location_default_value is not INVALID
if not has_non_null_variable_default_value and not has_location_default_value:
return False
location_type = cast(GraphQLNonNull, location_type)
nullable_location_type = location_type.of_type
return is_type_sub_type_of(schema, var_type, nullable_location_type)
return is_type_sub_type_of(schema, var_type, location_type) | python | {
"resource": ""
} |
q29983 | is_schema_of_common_names | train | def is_schema_of_common_names(schema: GraphQLSchema) -> bool:
"""Check whether this schema uses the common naming convention.
GraphQL schema define root types for each type of operation. These types are the
same as any other type and can be named in any manner, however there is a common
naming convention:
schema {
query: Query
mutation: Mutation
}
When using this naming convention, the schema description can be omitted.
"""
query_type = schema.query_type
if query_type and query_type.name != "Query":
return False
mutation_type = schema.mutation_type
if mutation_type and mutation_type.name != "Mutation":
return False
subscription_type = schema.subscription_type
if subscription_type and subscription_type.name != "Subscription":
return False
return True | python | {
"resource": ""
} |
q29984 | print_value | train | def print_value(value: Any, type_: GraphQLInputType) -> str:
"""Convenience function for printing a Python value"""
return print_ast(ast_from_value(value, type_)) | python | {
"resource": ""
} |
q29985 | add_description | train | def add_description(method):
"""Decorator adding the description to the output of a visitor method."""
@wraps(method)
def wrapped(self, node, *args):
return join([node.description, method(self, node, *args)], "\n")
return wrapped | python | {
"resource": ""
} |
q29986 | join | train | def join(strings: Optional[Sequence[str]], separator: str = "") -> str:
"""Join strings in a given sequence.
Return an empty string if it is None or empty, otherwise join all items together
separated by separator if provided.
"""
return separator.join(s for s in strings if s) if strings else "" | python | {
"resource": ""
} |
q29987 | wrap | train | def wrap(start: str, string: str, end: str = "") -> str:
"""Wrap string inside other strings at start and end.
If the string is not None or empty, then wrap with start and end, otherwise return
an empty string.
"""
return f"{start}{string}{end}" if string else "" | python | {
"resource": ""
} |
q29988 | has_multiline_items | train | def has_multiline_items(maybe_list: Optional[Sequence[str]]):
"""Check whether one of the items in the list has multiple lines."""
return maybe_list and any(is_multiline(item) for item in maybe_list) | python | {
"resource": ""
} |
q29989 | get_middleware_resolvers | train | def get_middleware_resolvers(middlewares: Tuple[Any, ...]) -> Iterator[Callable]:
"""Get a list of resolver functions from a list of classes or functions."""
for middleware in middlewares:
if isfunction(middleware):
yield middleware
else: # middleware provided as object with 'resolve' method
resolver_func = getattr(middleware, "resolve", None)
if resolver_func is not None:
yield resolver_func | python | {
"resource": ""
} |
q29990 | MiddlewareManager.get_field_resolver | train | def get_field_resolver(
self, field_resolver: GraphQLFieldResolver
) -> GraphQLFieldResolver:
"""Wrap the provided resolver with the middleware.
Returns a function that chains the middleware functions with the provided
resolver function.
"""
if self._middleware_resolvers is None:
return field_resolver
if field_resolver not in self._cached_resolvers:
self._cached_resolvers[field_resolver] = reduce(
lambda chained_fns, next_fn: partial(next_fn, chained_fns),
self._middleware_resolvers,
field_resolver,
)
return self._cached_resolvers[field_resolver] | python | {
"resource": ""
} |
q29991 | type_map_reducer | train | def type_map_reducer(map_: TypeMap, type_: GraphQLNamedType = None) -> TypeMap:
"""Reducer function for creating the type map from given types."""
if not type_:
return map_
if is_wrapping_type(type_):
return type_map_reducer(
map_, cast(GraphQLWrappingType[GraphQLNamedType], type_).of_type
)
name = type_.name
if name in map_:
if map_[name] is not type_:
raise TypeError(
"Schema must contain uniquely named types but contains multiple"
f" types named {name!r}."
)
return map_
map_[name] = type_
if is_union_type(type_):
type_ = cast(GraphQLUnionType, type_)
map_ = type_map_reduce(type_.types, map_)
if is_object_type(type_):
type_ = cast(GraphQLObjectType, type_)
map_ = type_map_reduce(type_.interfaces, map_)
if is_object_type(type_) or is_interface_type(type_):
for field in cast(GraphQLInterfaceType, type_).fields.values():
args = field.args
if args:
types = [arg.type for arg in args.values()]
map_ = type_map_reduce(types, map_)
map_ = type_map_reducer(map_, field.type)
if is_input_object_type(type_):
for field in cast(GraphQLInputObjectType, type_).fields.values():
map_ = type_map_reducer(map_, field.type)
return map_ | python | {
"resource": ""
} |
q29992 | type_map_directive_reducer | train | def type_map_directive_reducer(
map_: TypeMap, directive: GraphQLDirective = None
) -> TypeMap:
"""Reducer function for creating the type map from given directives."""
# Directives are not validated until validate_schema() is called.
if not is_directive(directive):
return map_
directive = cast(GraphQLDirective, directive)
return reduce(
lambda prev_map, arg: type_map_reducer(prev_map, arg.type), # type: ignore
directive.args.values(),
map_,
) | python | {
"resource": ""
} |
q29993 | GraphQLSchema.get_possible_types | train | def get_possible_types(
self, abstract_type: GraphQLAbstractType
) -> Sequence[GraphQLObjectType]:
"""Get list of all possible concrete types for given abstract type."""
if is_union_type(abstract_type):
abstract_type = cast(GraphQLUnionType, abstract_type)
return abstract_type.types
return self._implementations[abstract_type.name] | python | {
"resource": ""
} |
q29994 | GraphQLSchema.is_possible_type | train | def is_possible_type(
self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType
) -> bool:
"""Check whether a concrete type is possible for an abstract type."""
possible_type_map = self._possible_type_map
try:
possible_type_names = possible_type_map[abstract_type.name]
except KeyError:
possible_types = self.get_possible_types(abstract_type)
possible_type_names = {type_.name for type_ in possible_types}
possible_type_map[abstract_type.name] = possible_type_names
return possible_type.name in possible_type_names | python | {
"resource": ""
} |
q29995 | byte_adaptor | train | def byte_adaptor(fbuffer):
""" provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
"""
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer | python | {
"resource": ""
} |
q29996 | js_adaptor | train | def js_adaptor(buffer):
""" convert javascript objects like true, none, NaN etc. to
quoted word.
Arguments:
buffer: string to be converted
Returns:
string after conversion
"""
buffer = re.sub('true', 'True', buffer)
buffer = re.sub('false', 'False', buffer)
buffer = re.sub('none', 'None', buffer)
buffer = re.sub('NaN', '"NaN"', buffer)
return buffer | python | {
"resource": ""
} |
q29997 | Nse.get_bhavcopy_url | train | def get_bhavcopy_url(self, d):
"""take date and return bhavcopy url"""
d = parser.parse(d).date()
day_of_month = d.strftime("%d")
mon = d.strftime("%b").upper()
year = d.year
url = self.bhavcopy_base_url % (year, mon, day_of_month, mon, year)
return url | python | {
"resource": ""
} |
q29998 | Nse.download_bhavcopy | train | def download_bhavcopy(self, d):
"""returns bhavcopy as csv file."""
# ex_url = "https://www.nseindia.com/content/historical/EQUITIES/2011/NOV/cm08NOV2011bhav.csv.zip"
url = self.get_bhavcopy_url(d)
filename = self.get_bhavcopy_filename(d)
# response = requests.get(url, headers=self.headers)
response = self.opener.open(Request(url, None, self.headers))
zip_file_handle = io.BytesIO(response.read())
zf = zipfile.ZipFile(zip_file_handle)
try:
result = zf.read(filename)
except KeyError:
result = zf.read(zf.filelist[0].filename)
return result | python | {
"resource": ""
} |
q29999 | _replace_numeric_markers | train | def _replace_numeric_markers(operation, string_parameters):
"""
Replaces qname, format, and numeric markers in the given operation, from
the string_parameters list.
Raises ProgrammingError on wrong number of parameters or bindings
when using qmark. There is no error checking on numeric parameters.
"""
def replace_markers(marker, op, parameters):
param_count = len(parameters)
marker_index = 0
start_offset = 0
while True:
found_offset = op.find(marker, start_offset)
if not found_offset > -1:
break
if marker_index < param_count:
op = op[:found_offset]+op[found_offset:].replace(marker, parameters[marker_index], 1)
start_offset = found_offset + len(parameters[marker_index])
marker_index += 1
else:
raise ProgrammingError("Incorrect number of bindings "
"supplied. The current statement uses "
"%d or more, and there are %d "
"supplied." % (marker_index + 1,
param_count))
if marker_index != 0 and marker_index != param_count:
raise ProgrammingError("Incorrect number of bindings "
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(marker_index + 1, param_count))
return op
# replace qmark parameters and format parameters
operation = replace_markers('?', operation, string_parameters)
operation = replace_markers(r'%s', operation, string_parameters)
# replace numbered parameters
# Go through them backwards so smaller numbers don't replace
# parts of larger ones
for index in range(len(string_parameters), 0, -1):
operation = operation.replace(':' + str(index),
string_parameters[index - 1])
return operation | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.