_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q32900
TwistedConnectionProtocol.connectionMade
train
def connectionMade(self): """ Callback function that is called when a connection has succeeded. Reaches back to the Connection object and confirms that the connection is ready. """ try: # Non SSL connection self.connection = self.transport.connector.factory.conn except AttributeError: # SSL connection self.connection = self.transport.connector.factory.wrappedFactory.conn self.connection.client_connection_made(self.transport)
python
{ "resource": "" }
q32901
TwistedConnectionClientFactory.clientConnectionFailed
train
def clientConnectionFailed(self, connector, reason): """ Overridden twisted callback which is called when the connection attempt fails. """ log.debug("Connect failed: %s", reason) self.conn.defunct(reason.value)
python
{ "resource": "" }
q32902
TwistedConnection.add_connection
train
def add_connection(self): """ Convenience function to connect and store the resulting connector. """ if self.ssl_options: if not _HAS_SSL: raise ImportError( str(e) + ', pyOpenSSL must be installed to enable SSL support with the Twisted event loop' ) self.connector = reactor.connectSSL( host=self.endpoint.address, port=self.port, factory=TwistedConnectionClientFactory(self), contextFactory=_SSLContextFactory(self.ssl_options, self._check_hostname, self.endpoint.address), timeout=self.connect_timeout) else: self.connector = reactor.connectTCP( host=self.endpoint.address, port=self.port, factory=TwistedConnectionClientFactory(self), timeout=self.connect_timeout)
python
{ "resource": "" }
q32903
TwistedConnection.client_connection_made
train
def client_connection_made(self, transport): """ Called by twisted protocol when a connection attempt has succeeded. """ with self.lock: self.is_closed = False self.transport = transport self._send_options_message()
python
{ "resource": "" }
q32904
TwistedConnection.close
train
def close(self): """ Disconnect and error-out all requests. """ with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s", id(self), self.endpoint) reactor.callFromThread(self.connector.disconnect) log.debug("Closed socket to %s", self.endpoint) if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set()
python
{ "resource": "" }
q32905
MonotonicTimestampGenerator._next_timestamp
train
def _next_timestamp(self, now, last): """ Returns the timestamp that should be used if ``now`` is the current time and ``last`` is the last timestamp returned by this object. Intended for internal and testing use only; to generate timestamps, call an instantiated ``MonotonicTimestampGenerator`` object. :param int now: an integer to be used as the current time, typically representing the current time in microseconds since the UNIX epoch :param int last: an integer representing the last timestamp returned by this object """ if now > last: self.last = now return now else: self._maybe_warn(now=now) self.last = last + 1 return self.last
python
{ "resource": "" }
q32906
BaseModel._get_column_by_db_name
train
def _get_column_by_db_name(cls, name): """ Returns the column, mapped by db_field name """ return cls._columns.get(cls._db_map.get(name, name))
python
{ "resource": "" }
q32907
BaseModel._as_dict
train
def _as_dict(self): """ Returns a map of column names to cleaned values """ values = self._dynamic_columns or {} for name, col in self._columns.items(): values[name] = col.to_database(getattr(self, name, None)) return values
python
{ "resource": "" }
q32908
BaseModel.create
train
def create(cls, **kwargs): """ Create an instance of this model in the database. Takes the model column values as keyword arguments. Setting a value to `None` is equivalent to running a CQL `DELETE` on that column. Returns the instance. """ extra_columns = set(kwargs.keys()) - set(cls._columns.keys()) if extra_columns: raise ValidationError("Incorrect columns passed: {0}".format(extra_columns)) return cls.objects.create(**kwargs)
python
{ "resource": "" }
q32909
BaseModel.save
train
def save(self): """ Saves an object to the database. .. code-block:: python #create a person instance person = Person(first_name='Kimberly', last_name='Eggleston') #saves it to Cassandra person.save() """ # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: raise PolymorphicModelException('cannot save polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).save() self._set_persisted() self._timestamp = None return self
python
{ "resource": "" }
q32910
BaseModel.update
train
def update(self, **values): """ Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be performed. Model validation is performed normally. Setting a value to `None` is equivalent to running a CQL `DELETE` on that column. It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates <blind_updates>` """ for column_id, v in values.items(): col = self._columns.get(column_id) # check for nonexistant columns if col is None: raise ValidationError( "{0}.{1} has no column named: {2}".format( self.__module__, self.__class__.__name__, column_id)) # check for primary key update attempts if col.is_primary_key: current_value = getattr(self, column_id) if v != current_value: raise ValidationError( "Cannot apply update to primary key '{0}' for {1}.{2}".format( column_id, self.__module__, self.__class__.__name__)) setattr(self, column_id, v) # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: raise PolymorphicModelException('cannot update polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).update() self._set_persisted() self._timestamp = None return self
python
{ "resource": "" }
q32911
BaseModel.delete
train
def delete(self): """ Deletes the object from the database """ self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout, conditional=self._conditional, if_exists=self._if_exists).delete()
python
{ "resource": "" }
q32912
BaseModel.get_changed_columns
train
def get_changed_columns(self): """ Returns a list of the columns that have been updated since instantiation or save """ return [k for k, v in self._values.items() if v.changed]
python
{ "resource": "" }
q32913
ProtocolVersion.get_lower_supported
train
def get_lower_supported(cls, previous_version): """ Return the lower supported protocol version. Beta versions are omitted. """ try: version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if v not in ProtocolVersion.BETA_VERSIONS and v < previous_version) except StopIteration: version = 0 return version
python
{ "resource": "" }
q32914
BaseClause.update_context
train
def update_context(self, ctx): """ updates the query context with this clauses values """ assert isinstance(ctx, dict) ctx[str(self.context_id)] = self.value
python
{ "resource": "" }
q32915
Connection.get_request_id
train
def get_request_id(self): """ This must be called while self.lock is held. """ try: return self.request_ids.popleft() except IndexError: new_request_id = self.highest_request_id + 1 # in_flight checks should guarantee this assert new_request_id <= self.max_request_id self.highest_request_id = new_request_id return self.highest_request_id
python
{ "resource": "" }
q32916
Connection.register_watcher
train
def register_watcher(self, event_type, callback, register_timeout=None): """ Register a callback for a given event type. """ self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout)
python
{ "resource": "" }
q32917
Metadata.rebuild_token_map
train
def rebuild_token_map(self, partitioner, token_map): """ Rebuild our view of the topology from fresh rows from the system topology tables. For internal use only. """ self.partitioner = partitioner if partitioner.endswith('RandomPartitioner'): token_class = MD5Token elif partitioner.endswith('Murmur3Partitioner'): token_class = Murmur3Token elif partitioner.endswith('ByteOrderedPartitioner'): token_class = BytesToken else: self.token_map = None return token_to_host_owner = {} ring = [] for host, token_strings in six.iteritems(token_map): for token_string in token_strings: token = token_class.from_string(token_string) ring.append(token) token_to_host_owner[token] = host all_tokens = sorted(ring) self.token_map = TokenMap( token_class, token_to_host_owner, all_tokens, self)
python
{ "resource": "" }
q32918
KeyspaceMetadata.export_as_string
train
def export_as_string(self): """ Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables. """ cql = "\n\n".join([self.as_cql_query() + ';'] + self.user_type_strings() + [f.export_as_string() for f in self.functions.values()] + [a.export_as_string() for a in self.aggregates.values()] + [t.export_as_string() for t in self.tables.values()]) if self._exc_info: import traceback ret = "/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n" % \ (self.name) for line in traceback.format_exception(*self._exc_info): ret += line ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % cql return ret if self.virtual: return ("/*\nWarning: Keyspace {ks} is a virtual keyspace and cannot be recreated with CQL.\n" "Structure, for reference:*/\n" "{cql}\n" "").format(ks=self.name, cql=cql) return cql
python
{ "resource": "" }
q32919
KeyspaceMetadata.as_cql_query
train
def as_cql_query(self): """ Returns a CQL query string that can be used to recreate just this keyspace, not including user-defined types and tables. """ if self.virtual: return "// VIRTUAL KEYSPACE {}".format(protect_name(self.name)) ret = "CREATE KEYSPACE %s WITH replication = %s " % ( protect_name(self.name), self.replication_strategy.export_for_schema()) return ret + (' AND durable_writes = %s' % ("true" if self.durable_writes else "false"))
python
{ "resource": "" }
q32920
TableMetadata.is_cql_compatible
train
def is_cql_compatible(self): """ A boolean indicating if this table can be represented as CQL in export """ if self.virtual: return False comparator = getattr(self, 'comparator', None) if comparator: # no compact storage with more than one column beyond PK if there # are clustering columns incompatible = (self.is_compact_storage and len(self.columns) > len(self.primary_key) + 1 and len(self.clustering_key) >= 1) return not incompatible return True
python
{ "resource": "" }
q32921
TableMetadata.export_as_string
train
def export_as_string(self): """ Returns a string of CQL queries that can be used to recreate this table along with all indexes on it. The returned string is formatted to be human readable. """ if self._exc_info: import traceback ret = "/*\nWarning: Table %s.%s is incomplete because of an error processing metadata.\n" % \ (self.keyspace_name, self.name) for line in traceback.format_exception(*self._exc_info): ret += line ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql() elif not self.is_cql_compatible: # If we can't produce this table with CQL, comment inline ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \ (self.keyspace_name, self.name) ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql() elif self.virtual: ret = ('/*\nWarning: Table {ks}.{tab} is a virtual table and cannot be recreated with CQL.\n' 'Structure, for reference:\n' '{cql}\n*/').format(ks=self.keyspace_name, tab=self.name, cql=self._all_as_cql()) else: ret = self._all_as_cql() return ret
python
{ "resource": "" }
q32922
IndexMetadata.as_cql_query
train
def as_cql_query(self): """ Returns a CQL query that can be used to recreate this index. """ options = dict(self.index_options) index_target = options.pop("target") if self.kind != "CUSTOM": return "CREATE INDEX %s ON %s.%s (%s)" % ( protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target) else: class_name = options.pop("class_name") ret = "CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % ( protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target, class_name) if options: # PYTHON-1008: `ret` will always be a unicode opts_cql_encoded = _encoder.cql_encode_all_types(options, as_text_type=True) ret += " WITH OPTIONS = %s" % opts_cql_encoded return ret
python
{ "resource": "" }
q32923
BytesToken.from_string
train
def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" if isinstance(token_string, six.text_type): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string))
python
{ "resource": "" }
q32924
EventletConnection.service_timeouts
train
def service_timeouts(cls): """ cls._timeout_watcher runs in this loop forever. It is usually waiting for the next timeout on the cls._new_timer Event. When new timers are added, that event is set so that the watcher can wake up and possibly set an earlier timeout. """ timer_manager = cls._timers while True: next_end = timer_manager.service_timeouts() sleep_time = max(next_end - time.time(), 0) if next_end else 10000 cls._new_timer.wait(sleep_time) cls._new_timer.clear()
python
{ "resource": "" }
q32925
EC2MultiRegionTranslator.translate
train
def translate(self, addr): """ Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ # get family of this address so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): try: return a[4][0] except Exception: pass return addr
python
{ "resource": "" }
q32926
Encoder.cql_encode_float
train
def cql_encode_float(self, val): """ Encode floats using repr to preserve precision """ if math.isinf(val): return 'Infinity' if val > 0 else '-Infinity' elif math.isnan(val): return 'NaN' else: return repr(val)
python
{ "resource": "" }
q32927
cython_protocol_handler
train
def cython_protocol_handler(colparser): """ Given a column parser to deserialize ResultMessages, return a suitable Cython-based protocol handler. There are three Cython-based protocol handlers: - obj_parser.ListParser decodes result messages into a list of tuples - obj_parser.LazyParser decodes result messages lazily by returning an iterator - numpy_parser.NumPyParser decodes result messages into NumPy arrays The default is to use obj_parser.ListParser """ from cassandra.row_parser import make_recv_results_rows class FastResultMessage(ResultMessage): """ Cython version of Result Message that has a faster implementation of recv_results_row. """ # type_codes = ResultMessage.type_codes.copy() code_to_type = dict((v, k) for k, v in ResultMessage.type_codes.items()) recv_results_rows = classmethod(make_recv_results_rows(colparser)) class CythonProtocolHandler(_ProtocolHandler): """ Use FastResultMessage to decode query result message messages. """ my_opcodes = _ProtocolHandler.message_types_by_opcode.copy() my_opcodes[FastResultMessage.opcode] = FastResultMessage message_types_by_opcode = my_opcodes col_parser = colparser return CythonProtocolHandler
python
{ "resource": "" }
q32928
_ProtocolHandler.encode_message
train
def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version): """ Encodes a message using the specified frame parameters, and compressor :param msg: the message, typically of cassandra.protocol._MessageType, generated by the driver :param stream_id: protocol stream id for the frame header :param protocol_version: version for the frame header, and used encoding contents :param compressor: optional compression function to be used on the body """ flags = 0 body = io.BytesIO() if msg.custom_payload: if protocol_version < 4: raise UnsupportedOperation("Custom key/value payloads can only be used with protocol version 4 or higher") flags |= CUSTOM_PAYLOAD_FLAG write_bytesmap(body, msg.custom_payload) msg.send_body(body, protocol_version) body = body.getvalue() if compressor and len(body) > 0: body = compressor(body) flags |= COMPRESSED_FLAG if msg.tracing: flags |= TRACING_FLAG if allow_beta_protocol_version: flags |= USE_BETA_FLAG buff = io.BytesIO() cls._write_header(buff, protocol_version, flags, stream_id, msg.opcode, len(body)) buff.write(body) return buff.getvalue()
python
{ "resource": "" }
q32929
_ProtocolHandler._write_header
train
def _write_header(f, version, flags, stream_id, opcode, length): """ Write a CQL protocol frame header. """ pack = v3_header_pack if version >= 3 else header_pack f.write(pack(version, flags, stream_id, opcode)) write_int(f, length)
python
{ "resource": "" }
q32930
_ProtocolHandler.decode_message
train
def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, decompressor, result_metadata): """ Decodes a native protocol message body :param protocol_version: version to use decoding contents :param user_type_map: map[keyspace name] = map[type name] = custom type to instantiate when deserializing this type :param stream_id: native protocol stream id from the frame header :param flags: native protocol flags bitmap from the header :param opcode: native protocol opcode from the header :param body: frame body :param decompressor: optional decompression function to inflate the body :return: a message decoded from the body and frame attributes """ if flags & COMPRESSED_FLAG: if decompressor is None: raise RuntimeError("No de-compressor available for compressed frame!") body = decompressor(body) flags ^= COMPRESSED_FLAG body = io.BytesIO(body) if flags & TRACING_FLAG: trace_id = UUID(bytes=body.read(16)) flags ^= TRACING_FLAG else: trace_id = None if flags & WARNING_FLAG: warnings = read_stringlist(body) flags ^= WARNING_FLAG else: warnings = None if flags & CUSTOM_PAYLOAD_FLAG: custom_payload = read_bytesmap(body) flags ^= CUSTOM_PAYLOAD_FLAG else: custom_payload = None flags &= USE_BETA_MASK # will only be set if we asserted it in connection estabishment if flags: log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload msg.warnings = warnings if msg.warnings: for w in msg.warnings: log.warning("Server warning: %s", w) return msg
python
{ "resource": "" }
q32931
format_log_context
train
def format_log_context(msg, connection=None, keyspace=None): """Format log message to add keyspace and connection context""" connection_info = connection or 'DEFAULT_CONNECTION' if keyspace: msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg) else: msg = '[Connection: {0}] {1}'.format(connection_info, msg) return msg
python
{ "resource": "" }
q32932
setup
train
def setup( hosts, default_keyspace, consistency=None, lazy_connect=False, retry_connect=False, **kwargs): """ Setup a the driver connection used by the mapper :param list hosts: list of hosts, (``contact_points`` for :class:`cassandra.cluster.Cluster`) :param str default_keyspace: The default keyspace to use :param int consistency: The global default :class:`~.ConsistencyLevel` - default is the same as :attr:`.Session.default_consistency_level` :param bool lazy_connect: True if should not connect until first use :param bool retry_connect: True if we should retry to connect even if there was a connection failure initially :param \*\*kwargs: Pass-through keyword arguments for :class:`cassandra.cluster.Cluster` """ from cassandra.cqlengine import models models.DEFAULT_KEYSPACE = default_keyspace register_connection('default', hosts=hosts, consistency=consistency, lazy_connect=lazy_connect, retry_connect=retry_connect, cluster_options=kwargs, default=True)
python
{ "resource": "" }
q32933
Connection.setup
train
def setup(self): """Setup the connection""" global cluster, session if 'username' in self.cluster_options or 'password' in self.cluster_options: raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if self.lazy_connect: return self.cluster = Cluster(self.hosts, **self.cluster_options) try: self.session = self.cluster.connect() log.debug(format_log_context("connection initialized with internally created session", connection=self.name)) except NoHostAvailable: if self.retry_connect: log.warning(format_log_context("connect failed, setting up for re-attempt on first use", connection=self.name)) self.lazy_connect = True raise if self.consistency is not None: self.session.default_consistency_level = self.consistency if DEFAULT_CONNECTION in _connections and _connections[DEFAULT_CONNECTION] == self: cluster = _connections[DEFAULT_CONNECTION].cluster session = _connections[DEFAULT_CONNECTION].session self.setup_session()
python
{ "resource": "" }
q32934
run_in_executor
train
def run_in_executor(f): """ A decorator to run the given method in the ThreadPoolExecutor. """ @wraps(f) def new_f(self, *args, **kwargs): if self.is_shutdown: return try: future = self.executor.submit(f, self, *args, **kwargs) future.add_done_callback(_future_completed) except Exception: log.exception("Failed to submit task to executor") return new_f
python
{ "resource": "" }
q32935
_watch_callback
train
def _watch_callback(obj_weakref, method_name, *args, **kwargs): """ A callback handler for the ControlConnection that tolerates weak references. """ obj = obj_weakref() if obj is None: return getattr(obj, method_name)(*args, **kwargs)
python
{ "resource": "" }
q32936
Cluster.register_user_type
train
def register_user_type(self, keyspace, user_type, klass): """ Registers a class to use to represent a particular user-defined type. Query parameters for this user-defined type will be assumed to be instances of `klass`. Result sets for this user-defined type will be instances of `klass`. If no class is registered for a user-defined type, a namedtuple will be used for result sets, and non-prepared statements may not encode parameters for this type correctly. `keyspace` is the name of the keyspace that the UDT is defined in. `user_type` is the string name of the UDT to register the mapping for. `klass` should be a class with attributes whose names match the fields of the user-defined type. The constructor must accepts kwargs for each of the fields in the UDT. This method should only be called after the type has been created within Cassandra. Example:: cluster = Cluster(protocol_version=3) session = cluster.connect() session.set_keyspace('mykeyspace') session.execute("CREATE TYPE address (street text, zipcode int)") session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)") # create a class to map to the "address" UDT class Address(object): def __init__(self, street, zipcode): self.street = street self.zipcode = zipcode cluster.register_user_type('mykeyspace', 'address', Address) # insert a row using an instance of Address session.execute("INSERT INTO users (id, location) VALUES (%s, %s)", (0, Address("123 Main St.", 78723))) # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] print row.id, row.location.street, row.location.zipcode """ if self.protocol_version < 3: log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). " "CQL encoding for simple statements will still work, but named tuples will " "be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type) self._user_types[keyspace][user_type] = klass for session in tuple(self.sessions): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type)
python
{ "resource": "" }
q32937
Cluster.connection_factory
train
def connection_factory(self, endpoint, *args, **kwargs): """ Called to create a new connection with proper configuration. Intended for internal use only. """ kwargs = self._make_connection_kwargs(endpoint, kwargs) return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs)
python
{ "resource": "" }
q32938
Cluster.add_host
train
def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True): """ Called when adding initial contact points and when the control connection subsequently discovers a new node. Returns a Host instance, and a flag indicating whether it was new in the metadata. Intended for internal use only. """ host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack)) if new and signal: log.info("New Cassandra host %r discovered", host) self.on_add(host, refresh_nodes) return host, new
python
{ "resource": "" }
q32939
Cluster.remove_host
train
def remove_host(self, host): """ Called when the control connection observes that a node has left the ring. Intended for internal use only. """ if host and self.metadata.remove_host(host): log.info("Cassandra host %s removed", host) self.on_remove(host)
python
{ "resource": "" }
q32940
Cluster._ensure_core_connections
train
def _ensure_core_connections(self): """ If any host has fewer than the configured number of core connections open, attempt to open connections until that number is met. """ for session in tuple(self.sessions): for pool in tuple(session._pools.values()): pool.ensure_core_connections()
python
{ "resource": "" }
q32941
Cluster.get_control_connection_host
train
def get_control_connection_host(self): """ Returns the control connection host metadata. """ connection = self.control_connection._connection endpoint = connection.endpoint if connection else None return self.metadata.get_host(endpoint) if endpoint else None
python
{ "resource": "" }
q32942
Cluster.refresh_schema_metadata
train
def refresh_schema_metadata(self, max_schema_agreement_wait=None): """ Synchronously refresh all schema metadata. By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait` and :attr:`~.Cluster.control_connection_timeout`. Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`. Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately. An Exception is raised if schema refresh fails for any reason. """ if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("Schema metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32943
Cluster.refresh_keyspace_metadata
train
def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None): """ Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication and durability settings. It does not refresh tables, types, etc. contained in the keyspace. See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("Keyspace metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32944
Cluster.refresh_table_metadata
train
def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None): """ Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached to the table. See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("Table metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32945
Cluster.refresh_user_type_metadata
train
def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None): """ Synchronously refresh user defined type metadata. See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("User Type metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32946
Cluster.refresh_user_function_metadata
train
def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None): """ Synchronously refresh user defined function metadata. ``function`` is a :class:`cassandra.UserFunctionDescriptor`. See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("User Function metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32947
Cluster.refresh_user_aggregate_metadata
train
def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None): """ Synchronously refresh user defined aggregate metadata. ``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`. See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior """ if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait, force=True): raise DriverException("User Aggregate metadata was not refreshed. See log for details.")
python
{ "resource": "" }
q32948
Session.execute
train
def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None, host=None): """ Execute the given query and synchronously wait for the response. If an error is encountered while executing the query, an Exception will be raised. `query` may be a query string or an instance of :class:`cassandra.query.Statement`. `parameters` may be a sequence or dict of parameters to bind. If a sequence is used, ``%s`` should be used the placeholder for each argument. If a dict is used, ``%(name)s`` style placeholders must be used. `timeout` should specify a floating-point timeout (in seconds) after which an :exc:`.OperationTimedOut` exception will be raised if the query has not completed. If not set, the timeout defaults to :attr:`~.Session.default_timeout`. If set to :const:`None`, there is no timeout. Please see :meth:`.ResponseFuture.result` for details on the scope and effect of this timeout. If `trace` is set to :const:`True`, the query will be sent with tracing enabled. The trace details can be obtained using the returned :class:`.ResultSet` object. `custom_payload` is a :ref:`custom_payload` dict to be passed to the server. If `query` is a Statement with its own custom_payload. The message payload will be a union of the two, with the values specified here taking precedence. `execution_profile` is the execution profile to use for this request. It can be a key to a profile configured via :meth:`Cluster.add_execution_profile` or an instance (from :meth:`Session.execution_profile_clone_update`, for example `paging_state` is an optional paging state, reused from a previous :class:`ResultSet`. `host` is the :class:`pool.Host` that should handle the query. Using this is discouraged except in a few cases, e.g., querying node-local tables and applying schema changes. """ return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host).result()
python
{ "resource": "" }
q32949
Session.get_execution_profile
train
def get_execution_profile(self, name): """ Returns the execution profile associated with the provided ``name``. :param name: The name (or key) of the execution profile. """ profiles = self.cluster.profile_manager.profiles try: return profiles[name] except KeyError: raise ValueError("Invalid execution_profile: '%s'; valid profiles are %s" % (name, profiles.keys()))
python
{ "resource": "" }
q32950
Session.execution_profile_clone_update
train
def execution_profile_clone_update(self, ep, **kwargs): """ Returns a clone of the ``ep`` profile. ``kwargs`` can be specified to update attributes of the returned profile. This is a shallow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy is maintained by inclusion in the active profiles. It also means updating any other rich objects will be seen by the active profile. In cases where this is not desirable, be sure to replace the instance instead of manipulating the shared object. """ clone = copy(self._maybe_get_execution_profile(ep)) for attr, value in kwargs.items(): setattr(clone, attr, value) return clone
python
{ "resource": "" }
q32951
Session.add_request_init_listener
train
def add_request_init_listener(self, fn, *args, **kwargs): """ Adds a callback with arguments to be called when any request is created. It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created, and before the request is sent\*. This can be used to create extensions by adding result callbacks to the response future. \* where `response_future` is the :class:`.ResponseFuture` for the request. Note that the init callback is done on the client thread creating the request, so you may need to consider synchronization if you have multiple threads. Any callbacks added to the response future will be executed on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in :meth:`.ResponseFuture.add_callbacks`. See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the source tree for an example. """ self._request_init_callbacks.append((fn, args, kwargs))
python
{ "resource": "" }
q32952
Session.remove_request_init_listener
train
def remove_request_init_listener(self, fn, *args, **kwargs): """ Removes a callback and arguments from the list. See :meth:`.Session.add_request_init_listener`. """ self._request_init_callbacks.remove((fn, args, kwargs))
python
{ "resource": "" }
q32953
Session.prepare_on_all_hosts
train
def prepare_on_all_hosts(self, query, excluded_host, keyspace=None): """ Prepare the given query on all hosts, excluding ``excluded_host``. Intended for internal use only. """ futures = [] for host in tuple(self._pools.keys()): if host != excluded_host and host.is_up: future = ResponseFuture(self, PrepareMessage(query=query, keyspace=keyspace), None, self.default_timeout) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared # statement is used. Just log errors and continue on. try: request_id = future._query(host) except Exception: log.exception("Error preparing query for host %s:", host) continue if request_id is None: # the error has already been logged by ResponsFuture log.debug("Failed to prepare query for host %s: %r", host, future._errors.get(host)) continue futures.append((host, future)) for host, future in futures: try: future.result() except Exception: log.exception("Error preparing query for host %s:", host)
python
{ "resource": "" }
q32954
Session.shutdown
train
def shutdown(self): """ Close all connections. ``Session`` instances should not be used for any purpose after being shutdown. """ with self._lock: if self.is_shutdown: return else: self.is_shutdown = True # PYTHON-673. If shutdown was called shortly after session init, avoid # a race by cancelling any initial connection attempts haven't started, # then blocking on any that have. for future in self._initial_connect_futures: future.cancel() wait_futures(self._initial_connect_futures) for pool in tuple(self._pools.values()): pool.shutdown()
python
{ "resource": "" }
q32955
Session.on_down
train
def on_down(self, host): """ Called by the parent Cluster instance when a node is marked down. Only intended for internal use. """ future = self.remove_pool(host) if future: future.add_done_callback(lambda f: self.update_created_pools())
python
{ "resource": "" }
q32956
Session._set_keyspace_for_all_pools
train
def _set_keyspace_for_all_pools(self, keyspace, callback): """ Asynchronously sets the keyspace on all pools. When all pools have set all of their connections, `callback` will be called with a dictionary of all errors that occurred, keyed by the `Host` that they occurred against. """ with self._lock: self.keyspace = keyspace remaining_callbacks = set(self._pools.values()) errors = {} if not remaining_callbacks: callback(errors) return def pool_finished_setting_keyspace(pool, host_errors): remaining_callbacks.remove(pool) if host_errors: errors[pool.host] = host_errors if not remaining_callbacks: callback(host_errors) for pool in tuple(self._pools.values()): pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace)
python
{ "resource": "" }
q32957
Session.user_type_registered
train
def user_type_registered(self, keyspace, user_type, klass): """ Called by the parent Cluster instance when the user registers a new mapping from a user-defined type to a class. Intended for internal use only. """ try: ks_meta = self.cluster.metadata.keyspaces[keyspace] except KeyError: raise UserTypeDoesNotExist( 'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,)) try: type_meta = ks_meta.user_types[user_type] except KeyError: raise UserTypeDoesNotExist( 'User type %s does not exist in keyspace %s' % (user_type, keyspace)) field_names = type_meta.field_names if six.PY2: # go from unicode to string to avoid decode errors from implicit # decode when formatting non-ascii values field_names = [fn.encode('utf-8') for fn in field_names] def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( field_name, self.encoder.cql_encode_all_types(getattr(val, field_name, None)) ) for field_name in field_names) self.encoder.mapping[klass] = encode
python
{ "resource": "" }
q32958
ControlConnection._get_and_set_reconnection_handler
train
def _get_and_set_reconnection_handler(self, new_handler): """ Called by the _ControlReconnectionHandler when a new connection is successfully created. Clears out the _reconnection_handler on this ControlConnection. """ with self._reconnection_lock: old = self._reconnection_handler self._reconnection_handler = new_handler return old
python
{ "resource": "" }
q32959
ControlConnection._address_from_row
train
def _address_from_row(self, row): """ Parse the broadcast rpc address from a row and return it untranslated. """ addr = None if "rpc_address" in row: addr = row.get("rpc_address") # peers and local if "native_transport_address" in row: addr = row.get("native_transport_address") if not addr or addr in ["0.0.0.0", "::"]: addr = row.get("peer") return addr
python
{ "resource": "" }
q32960
ResponseFuture._on_timeout
train
def _on_timeout(self, _attempts=0): """ Called when the request associated with this ResponseFuture times out. This function may reschedule itself. The ``_attempts`` parameter tracks the number of times this has happened. This parameter should only be set in those cases, where ``_on_timeout`` reschedules itself. """ # PYTHON-853: for short timeouts, we sometimes race with our __init__ if self._connection is None and _attempts < 3: self._timer = self.session.cluster.connection_class.create_timer( 0.01, partial(self._on_timeout, _attempts=_attempts + 1) ) return if self._connection is not None: try: self._connection._requests.pop(self._req_id) # This prevents the race condition of the # event loop thread just receiving the waited message # If it arrives after this, it will be ignored except KeyError: return pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: with self._connection.lock: self._connection.request_ids.append(self._req_id) pool.return_connection(self._connection) errors = self._errors if not errors: if self.is_schema_agreed: key = str(self._current_host.endpoint) if self._current_host else 'no host queried before timeout' errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection host = str(connection.endpoint) if connection else 'unknown' errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} self._set_final_exception(OperationTimedOut(errors, self._current_host))
python
{ "resource": "" }
q32961
ResponseFuture._execute_after_prepare
train
def _execute_after_prepare(self, host, connection, pool, response): """ Handle the response to our attempt to prepare a statement. If it succeeded, run the original query again against the same host. """ if pool: pool.return_connection(connection) if self._final_exception: return if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_PREPARED: if self.prepared_statement: # result metadata is the only thing that could have # changed from an alter (_, _, _, self.prepared_statement.result_metadata, new_metadata_id) = response.results if new_metadata_id is not None: self.prepared_statement.result_metadata_id = new_metadata_id # use self._query to re-use the same host and # at the same time properly borrow the connection request_id = self._query(host) if request_id is None: # this host errored out, move on to the next self.send_request() else: self._set_final_exception(ConnectionException( "Got unexpected response when preparing statement " "on host %s: %s" % (host, response))) elif isinstance(response, ErrorMessage): if hasattr(response, 'to_exception'): self._set_final_exception(response.to_exception()) else: self._set_final_exception(response) elif isinstance(response, ConnectionException): log.debug("Connection error when preparing statement on host %s: %s", host, response) # try again on a different host, preparing again if necessary self._errors[host] = response self.send_request() else: self._set_final_exception(ConnectionException( "Got unexpected response type when preparing " "statement on host %s: %s" % (host, response)))
python
{ "resource": "" }
q32962
ResponseFuture.result
train
def result(self): """ Return the final result or raise an Exception if errors were encountered. If the final result or error has not been set yet, this method will block until it is set, or the timeout set for the request expires. Timeout is specified in the Session request execution functions. If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised. This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. Example usage:: >>> future = session.execute_async("SELECT * FROM mycf") >>> # do other stuff... >>> try: ... rows = future.result() ... for row in rows: ... ... # process results ... except Exception: ... log.exception("Operation failed:") """ self._event.wait() if self._final_result is not _NOT_SET: return ResultSet(self, self._final_result) else: raise self._final_exception
python
{ "resource": "" }
q32963
ResponseFuture.get_query_trace
train
def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE): """ Fetches and returns the query trace of the last response, or `None` if tracing was not enabled. Note that this may raise an exception if there are problems retrieving the trace details from Cassandra. If the trace is not available after `max_wait`, :exc:`cassandra.query.TraceUnavailable` will be raised. If the ResponseFuture is not done (async execution) and you try to retrieve the trace, :exc:`cassandra.query.TraceUnavailable` will be raised. `query_cl` is the consistency level used to poll the trace tables. """ if self._final_result is _NOT_SET and self._final_exception is None: raise TraceUnavailable( "Trace information was not available. The ResponseFuture is not done.") if self._query_traces: return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl)
python
{ "resource": "" }
q32964
ResponseFuture.get_all_query_traces
train
def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE): """ Fetches and returns the query traces for all query pages, if tracing was enabled. See note in :meth:`~.get_query_trace` regarding possible exceptions. """ if self._query_traces: return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))] return []
python
{ "resource": "" }
q32965
ResponseFuture.add_callback
train
def add_callback(self, fn, *args, **kwargs): """ Attaches a callback function to be called when the final results arrive. By default, `fn` will be called with the results as the first and only argument. If `*args` or `**kwargs` are supplied, they will be passed through as additional positional or keyword arguments to `fn`. If an error is hit while executing the operation, a callback attached here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()` if you wish to handle that case. If the final result has already been seen when this method is called, the callback will be called immediately (before this method returns). Note: in the case that the result is not available when the callback is added, the callback is executed by IO event thread. This means that the callback should not block or attempt further synchronous requests, because no further IO will be processed until the callback returns. **Important**: if the callback you attach results in an exception being raised, **the exception will be ignored**, so please ensure your callback handles all error cases that you care about. Usage example:: >>> session = cluster.connect("mykeyspace") >>> def handle_results(rows, start_time, should_log=False): ... if should_log: ... log.info("Total time: %f", time.time() - start_time) ... ... >>> future = session.execute_async("SELECT * FROM users") >>> future.add_callback(handle_results, time.time(), should_log=True) """ run_now = False with self._callback_lock: # Always add fn to self._callbacks, even when we're about to # execute it, to prevent races with functions like # start_fetching_next_page that reset _final_result self._callbacks.append((fn, args, kwargs)) if self._final_result is not _NOT_SET: run_now = True if run_now: fn(self._final_result, *args, **kwargs) return self
python
{ "resource": "" }
q32966
ResultSet.was_applied
train
def was_applied(self): """ For LWT results, returns whether the transaction was applied. Result is indeterminate if called on a result that was not an LWT request or on a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch succeeds or fails. Only valid when one of the of the internal row factories is in use. """ if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) is_batch_statement = isinstance(self.response_future.query, BatchStatement) if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"): raise RuntimeError("No LWT were present in the BatchStatement") if not is_batch_statement and len(self.current_rows) != 1: raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows))) row = self.current_rows[0] if isinstance(row, tuple): return row[0] else: return row['[applied]']
python
{ "resource": "" }
q32967
Shutdown._prepair
train
def _prepair(self): '''Try to connect to the given dbus services. If successful it will return a callable dbus proxy and those arguments. ''' try: sessionbus = dbus.SessionBus() systembus = dbus.SystemBus() except: return (None, None) for dbus_props in self.DBUS_SHUTDOWN.values(): try: if dbus_props['bus'] == SESSION_BUS: bus = sessionbus else: bus = systembus interface = bus.get_object(dbus_props['service'], dbus_props['objectPath']) proxy = interface.get_dbus_method(dbus_props['method'], dbus_props['interface']) return (proxy, dbus_props['arguments']) except dbus.exceptions.DBusException: continue return (None, None)
python
{ "resource": "" }
q32968
Shutdown.shutdown
train
def shutdown(self): '''Call the dbus proxy to start the shutdown.''' if self._proxy: os.sync() self._proxy(*self._args)
python
{ "resource": "" }
q32969
App.on_app_shutdown
train
def on_app_shutdown(self, app): '''Dump profile content to disk''' if self.filewatcher: self.filewatcher.stop() if self.profile: self.upload_page.on_destroy() self.download_page.on_destroy()
python
{ "resource": "" }
q32970
async_call
train
def async_call(func, *args, callback=None): '''Call `func` in background thread, and then call `callback` in Gtk main thread. If error occurs in `func`, error will keep the traceback and passed to `callback` as second parameter. Always check `error` is not None. ''' def do_call(): result = None error = None try: result = func(*args) except Exception: error = traceback.format_exc() logger.error(error) if callback: GLib.idle_add(callback, result, error) thread = threading.Thread(target=do_call) thread.daemon = True thread.start()
python
{ "resource": "" }
q32971
calculate_legacy_pad_amount
train
def calculate_legacy_pad_amount(H_in, pad_h, k_h, s_h): ''' This function calculate padding amount along H-axis. It can be applied to other axes. It should be only used with pooling conversion. :param H_in: input dimension along H-axis :param pad_h: padding amount at H-axis :param k_h: kernel's H-axis dimension :param s_h: stride along H-axis :return: (top_padding_amount, bottom_padding_amount) ''' # Calculate a common variable H_temp = H_in + 2 * pad_h - k_h # Pooling output shape under CoerML IncludeLastPixel padding mode H_include_last_pad_out = math.ceil(H_temp / s_h) + 1 # Pooling output shape under valid padding mode H_valid_pad_out = math.floor(H_temp / s_h) + 1 # Amount of values padded at top boundary. For max pooling, the padded value should be "-inf." # For average pooling, we should pad zeros. pad_t = pad_h # Amount of values padded at bottom boundary (add extra pixels so that H_include_last_pad_out = floor( (H_adjusted_out - k_h) / stride) + 1) if H_include_last_pad_out > H_valid_pad_out: pad_b = pad_h + (s_h - H_temp % s_h) else: pad_b = pad_h # Intermediate result with pad_t values at top and pad_b valules at bottom of the original input H_adjusted_out = H_in + pad_t + pad_b # Adjust padded result if the original pooling wants to cut off the last output pixel. if (H_include_last_pad_out - 1) * s_h >= H_in + pad_h: if H_adjusted_out % s_h == 0: H_adjusted_out -= s_h else: H_adjusted_out -= H_adjusted_out % s_h return (pad_t, H_adjusted_out - pad_t - H_in)
python
{ "resource": "" }
q32972
create_legacy_pad
train
def create_legacy_pad(scope, input_name, output_name, H_in, W_in, k_h, k_w, s_h, s_w, p_h, p_w, padded_value, container): ''' This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of CoreML' pooling under IncludeLastPixel padding mode. :param scope: :param input_name: :param output_name: :param H_in: input dimension along H-axis :param W_in: input dimension along W-axis :param k_h: kernel's H-axis dimension :param k_w: kernel's W-axis dimension :param s_h: stride along H-axis :param s_w: stride along W-axis :param p_h: padding amount at the beginning and the end of H-axis :param p_w: padding amount at the beginning and the end of W-axis :param padded_value: value used to fill padded area :param container: Container object ''' # Add a Pad operator to pre-process 4-D tensor pad_t, pad_b = calculate_legacy_pad_amount(H_in, p_h, k_h, s_h) pad_l, pad_r = calculate_legacy_pad_amount(W_in, p_w, k_w, s_w) # CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded # is [N, C, H, W], so we have 8 padding amounts # pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index, # N_end_index, C_end_index, H_end_index, W_end_index] # Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros. pads = [0, 0, pad_t, pad_l, 0, 0, pad_b, pad_r] apply_pad(scope, input_name, output_name, container, pads=pads, value=padded_value)
python
{ "resource": "" }
q32973
_parse_model
train
def _parse_model(topology, scope, model, inputs=None, outputs=None): ''' This is a delegate function of all top-level parsing functions. It does nothing but call a proper function to parse the given model. ''' if inputs is None: inputs = list() if outputs is None: outputs = list() model_type = model.WhichOneof('Type') if model_type in ['pipeline', 'pipelineClassifier', 'pipelineRegressor']: _parse_pipeline_model(topology, scope, model, inputs, outputs) elif model_type in ['neuralNetworkClassifier', 'neuralNetworkRegressor', 'neuralNetwork']: _parse_neural_network_model(topology, scope, model, inputs, outputs) else: _parse_simple_model(topology, scope, model, inputs, outputs)
python
{ "resource": "" }
q32974
calculate_lstm_output_shapes
train
def calculate_lstm_output_shapes(operator): ''' See LSTM's conversion function for its output shapes. ''' check_input_and_output_numbers(operator, input_count_range=[1, 3], output_count_range=[1, 3]) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input_shape = operator.inputs[0].type.shape if len(input_shape) not in [2, 4]: raise RuntimeError('Input must be a 2-D tensor') params = operator.raw_operator.uniDirectionalLSTM # The following line is more accurate but it may break some tests # output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, params.outputVectorSize] output_shape = ['None', params.outputVectorSize] state_shape = [1, params.outputVectorSize] # TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function if len(operator.inputs) > 1: Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence Y_h_in.type.shape = state_shape if len(operator.inputs) > 2: Y_c_in = operator.inputs[2] # The initial cell state of a single sequence Y_c_in.type.shape = state_shape operator.outputs[0].type.shape = output_shape if len(operator.outputs) > 1: operator.outputs[1].type.shape = state_shape if len(operator.outputs) > 2: operator.outputs[2].type.shape = state_shape
python
{ "resource": "" }
q32975
get_xgb_params
train
def get_xgb_params(xgb_node): """ Retrieves parameters of a model. """ if hasattr(xgb_node, 'kwargs'): # XGBoost >= 0.7 params = xgb_node.get_xgb_params() else: # XGBoost < 0.7 params = xgb_node.__dict__ return params
python
{ "resource": "" }
q32976
_make_tensor_fixed
train
def _make_tensor_fixed(name, data_type, dims, vals, raw=False): ''' Make a TensorProto with specified arguments. If raw is False, this function will choose the corresponding proto field to store the values based on data_type. If raw is True, use "raw_data" proto field to store the values, and values should be of type bytes in this case. ''' tensor = TensorProto() tensor.data_type = data_type tensor.name = name if (data_type == TensorProto.COMPLEX64 or data_type == TensorProto.COMPLEX128): vals = split_complex_to_pairs(vals) if raw: tensor.raw_data = vals else: field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[ mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]] getattr(tensor, field).extend(vals) tensor.dims.extend(dims) return tensor
python
{ "resource": "" }
q32977
calculate_linear_classifier_output_shapes
train
def calculate_linear_classifier_output_shapes(operator): ''' This operator maps an input feature vector into a scalar label if the number of outputs is one. If two outputs appear in this operator's output list, we should further generate a map storing all classes' probabilities. Allowed input/output patterns are 1. [N, C] ---> [N, 1], A sequence of map Note that the second case is not allowed as long as ZipMap only produces dictionary. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=[1, 2]) check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType]) if len(operator.inputs[0].type.shape) != 2: raise RuntimeError('Input must be a [N, C]-tensor') N = operator.inputs[0].type.shape[0] class_labels = operator.raw_operator.classes_ if all(isinstance(i, np.ndarray) for i in class_labels): class_labels = np.concatenate(class_labels) if all(isinstance(i, (six.string_types, six.text_type)) for i in class_labels): operator.outputs[0].type = StringTensorType(shape=[N]) if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC': # For multi-class classifier, we produce a map for encoding the probabilities of all classes if operator.target_opset < 7: operator.outputs[1].type = DictionaryType(StringTensorType([1]), FloatTensorType([1])) else: operator.outputs[1].type = SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N) else: # For binary LinearSVC, we produce probability of the positive class operator.outputs[1].type = FloatTensorType(shape=[N, 1]) elif all(isinstance(i, (numbers.Real, bool, np.bool_)) for i in class_labels): operator.outputs[0].type = Int64TensorType(shape=[N]) if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC': # For multi-class classifier, we produce a map for encoding the probabilities of all classes if operator.target_opset < 7: operator.outputs[1].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1])) else: operator.outputs[1].type = SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N) else: # For binary LinearSVC, we produce probability of the positive class operator.outputs[1].type = FloatTensorType(shape=[N, 1]) else: raise ValueError('Unsupported or mixed label types')
python
{ "resource": "" }
q32978
is_backend_enabled
train
def is_backend_enabled(backend): """ Tells if a backend is enabled. """ if backend == "onnxruntime": try: import onnxruntime return True except ImportError: return False else: raise NotImplementedError("Not implemented for backend '{0}'".format(backend))
python
{ "resource": "" }
q32979
calculate_sparkml_string_indexer_output_shapes
train
def calculate_sparkml_string_indexer_output_shapes(operator): ''' This function just copy the input shape to the output because label encoder only alters input features' values, not their shape. ''' check_input_and_output_numbers(operator, output_count_range=1) check_input_and_output_types(operator, good_input_types=[Int64TensorType, StringTensorType]) input_shape = copy.deepcopy(operator.inputs[0].type.shape) operator.outputs[0].type = Int64TensorType(input_shape)
python
{ "resource": "" }
q32980
_post_process_output
train
def _post_process_output(res): """ Applies post processings before running the comparison such as changing type from list to arrays. """ if isinstance(res, list): if len(res) == 0: return res elif len(res) == 1: return _post_process_output(res[0]) elif isinstance(res[0], numpy.ndarray): return numpy.array(res) elif isinstance(res[0], dict): import pandas return pandas.DataFrame(res).values else: ls = [len(r) for r in res] mi = min(ls) if mi != max(ls): raise NotImplementedError("Unable to postprocess various number of outputs in [{0}, {1}]".format(min(ls), max(ls))) if mi > 1: output = [] for i in range(mi): output.append(_post_process_output([r[i] for r in res])) return output elif isinstance(res[0], list): # list of lists if isinstance(res[0][0], list): return numpy.array(res) elif len(res[0]) == 1 and isinstance(res[0][0], dict): return _post_process_output([r[0] for r in res]) elif len(res) == 1: return res else: if len(res[0]) != 1: raise NotImplementedError("Not conversion implemented for {0}".format(res)) st = [r[0] for r in res] return numpy.vstack(st) else: return res else: return res
python
{ "resource": "" }
q32981
_create_column
train
def _create_column(values, dtype): "Creates a column from values with dtype" if str(dtype) == "tensor(int64)": return numpy.array(values, dtype=numpy.int64) elif str(dtype) == "tensor(float)": return numpy.array(values, dtype=numpy.float32) else: raise OnnxRuntimeAssertionError("Unable to create one column from dtype '{0}'".format(dtype))
python
{ "resource": "" }
q32982
calculate_gru_output_shapes
train
def calculate_gru_output_shapes(operator): ''' See GRU's conversion function for its output shapes. ''' check_input_and_output_numbers(operator, input_count_range=[1, 2], output_count_range=[1, 2]) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input_shape = operator.inputs[0].type.shape if len(input_shape) not in [2, 4]: raise RuntimeError('Input must be a [N, C]- or [N, C, 1, 1]-tensor') if operator.type == 'gru': params = operator.raw_operator.gru elif operator.type == 'simpleRecurrent': params = operator.raw_operator.simpleRecurrent else: raise RuntimeError('Only GRU and SimpleRNN are supported') # The following line is more accurate but it may break some tests # output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [2, params.outputVectorSize] output_shape = [input_shape[0] if params.sequenceOutput else 'None', params.outputVectorSize] # 'None' should be 1 state_shape = [1, params.outputVectorSize] # TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function if len(operator.inputs) > 1: Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence Y_h_in.type.shape = state_shape operator.outputs[0].type.shape = output_shape if len(operator.outputs) > 1: operator.outputs[1].type.shape = state_shape
python
{ "resource": "" }
q32983
Solution.delete_node_nto1
train
def delete_node_nto1(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[] """ delete the node which has n-input and 1-output """ if begin is None: assert node is not None begin = node.precedence elif not isinstance(begin, list): begin = [begin] if end.in_or_out: # if the end is output node, the output name will be kept to avoid the model output name updating. for nb_ in begin: nb_.out_redirect(node.single_input, node.single_output) else: for nb_ in begin: target_var_name = node.single_input assert target_var_name in nb_.output.values() # since the output info never be updated, except the final. end.in_redirect(node.single_output, target_var_name) for nb_ in begin: nb_.successor = [end if v_ == node else v_ for v_ in nb_.successor] end.precedence = [v_ for v_ in end.precedence if v_ != node] + node.precedence node_list.remove(node) return node_list
python
{ "resource": "" }
q32984
Solution.delete_node_1ton
train
def delete_node_1ton(node_list, begin, node, end): # type: ([],LinkedNode, LinkedNode, LinkedNode)->[] """ delete the node which has 1-input and n-output """ if end is None: assert end is not None end = node.successor elif not isinstance(end, list): end = [end] if any(e_.in_or_out for e_ in end): # if the end is output node, the output name will be kept to avoid the model output name updating. begin.out_redirect(node.single_input, node.single_output) else: for ne_ in end: target_var_name = node.single_input # since the output info never be updated, except the final. assert target_var_name in begin.output.values() ne_.in_redirect(node.single_output, target_var_name) begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor for ne_ in end: ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence] node_list.remove(node) return node_list
python
{ "resource": "" }
q32985
Scope.get_onnx_variable_name
train
def get_onnx_variable_name(self, seed): ''' Retrieve the variable ID of the given seed or create one if it is the first time of seeing this seed ''' if seed in self.variable_name_mapping: return self.variable_name_mapping[seed][-1] else: return self.get_unique_variable_name(seed)
python
{ "resource": "" }
q32986
Scope.find_sink_variables
train
def find_sink_variables(self): ''' Find sink variables in this scope ''' # First we assume all variables are sinks is_sink = {name: True for name in self.variables.keys()} # Then, we remove those variables which are inputs of some operators for operator in self.operators.values(): for variable in operator.inputs: is_sink[variable.onnx_name] = False return [variable for name, variable in self.variables.items() if is_sink[name]]
python
{ "resource": "" }
q32987
Scope.declare_local_variable
train
def declare_local_variable(self, raw_name, type=None, prepend=False): ''' This function may create a new variable in this scope. If raw_name has been used to create other variables, the new variable will hide all other variables created using raw_name. ''' # Get unique ID for the new variable onnx_name = self.get_unique_variable_name(raw_name) # Create the variable variable = Variable(raw_name, onnx_name, self.name, type) self.variables[onnx_name] = variable if raw_name in self.variable_name_mapping: # Hide existing variables with the same raw_name if not prepend: self.variable_name_mapping[raw_name].append(onnx_name) else: self.variable_name_mapping[raw_name].insert(0, onnx_name) else: self.variable_name_mapping[raw_name] = [onnx_name] return variable
python
{ "resource": "" }
q32988
Scope.declare_local_operator
train
def declare_local_operator(self, type, raw_model=None): ''' This function is used to declare new local operator. ''' onnx_name = self.get_unique_operator_name(str(type)) operator = Operator(onnx_name, self.name, type, raw_model, self.target_opset) self.operators[onnx_name] = operator return operator
python
{ "resource": "" }
q32989
Scope.delete_local_operator
train
def delete_local_operator(self, onnx_name): ''' Remove the operator whose onnx_name is the input onnx_name ''' if onnx_name not in self.onnx_operator_names or onnx_name not in self.operators: raise RuntimeError('The operator to be removed not found') self.onnx_operator_names.discard(onnx_name) del self.operators[onnx_name]
python
{ "resource": "" }
q32990
Scope.delete_local_variable
train
def delete_local_variable(self, onnx_name): ''' Remove the variable whose onnx_name is the input onnx_name ''' if onnx_name not in self.onnx_variable_names or onnx_name not in self.variables: raise RuntimeError('The variable to be removed not found') self.onnx_variable_names.discard(onnx_name) raw_name = self.variables[onnx_name].raw_name self.variable_name_mapping[raw_name].remove(onnx_name) del self.variables[onnx_name]
python
{ "resource": "" }
q32991
Topology.find_root_and_sink_variables
train
def find_root_and_sink_variables(self): ''' Find root variables of the whole graph ''' # First we assume all variables are roots is_root = {name: True for scope in self.scopes for name in scope.variables.keys()} # Then, we remove those variables which are outputs of some operators for operator in self.unordered_operator_iterator(): for variable in operator.outputs: is_root[variable.onnx_name] = False is_sink = {name: True for scope in self.scopes for name in scope.variables.keys()} for operator in self.unordered_operator_iterator(): for variable in operator.inputs: is_sink[variable.onnx_name] = False return [variable for scope in self.scopes for name, variable in scope.variables.items() if is_root[name] or is_sink[name]]
python
{ "resource": "" }
q32992
Topology.topological_operator_iterator
train
def topological_operator_iterator(self): ''' This is an iterator of all operators in Topology object. Operators may be produced in a topological order. If you want to simply go though all operators without considering their topological structure, please use another function, unordered_operator_iterator. ''' self._initialize_graph_status_for_traversing() priorities = {'tensorToProbabilityMap': 2, 'tensorToLabel': 1} while not all(operator.is_evaluated for scope in self.scopes for operator in scope.operators.values()): is_evaluation_happened = False for operator in sorted(self.unordered_operator_iterator(), key=lambda op: priorities[op.type] if op.type in priorities else 0): if all(variable.is_fed for variable in operator.inputs) and not operator.is_evaluated: # Check if over-writing problem occurs (i.e., multiple operators produce results on one variable). for variable in operator.outputs: # Throw an error if this variable has been treated as an output somewhere if variable.is_fed: raise RuntimeError('One variable can only be assigned once') # Mark this variable as filled variable.is_fed = True # Make this operator as handled operator.is_evaluated = True is_evaluation_happened = True # Send out an operator yield operator # After scanning through the whole computational graph, at least one operator should be evaluated. If not, # we need to terminate this procedure to avoid dead lock. if not is_evaluation_happened: break
python
{ "resource": "" }
q32993
Topology._check_structure
train
def _check_structure(self): ''' This function applies some rules to check if the parsed model is proper. Currently, it only checks if isolated variable and isolated operator exists. ''' # Collect all variable names and operator names unused_variables = set() unused_operators = set() for variable in self.unordered_variable_iterator(): unused_variables.add(variable.full_name) for operator in self.unordered_operator_iterator(): unused_operators.add(operator.full_name) for operator in self.unordered_operator_iterator(): for variable in operator.inputs: # A variable is used by an operator, so we remove the variable from the unused-variable list. unused_variables.discard(variable.full_name) # A operator has an input, so we remove the operator from the unused-operator list. unused_operators.discard(operator.full_name) for variable in operator.outputs: # A variable is used by an operator, so we remove the variable from the unused-variable list. unused_variables.discard(variable.full_name) # A operator has an output, so we remove the operator from the unused-operator list. unused_operators.discard(operator.full_name) if len(unused_variables) > 0: raise RuntimeError('Isolated variables exist: %s' % unused_variables) if len(unused_operators) > 0: raise RuntimeError('Isolated operators exist: %s' % unused_operators)
python
{ "resource": "" }
q32994
Topology._initialize_graph_status_for_traversing
train
def _initialize_graph_status_for_traversing(self): ''' Initialize the status of all variables and operators for traversing the underline graph ''' # In the beginning, we set is_root and is_leaf true. For is_fed, we have two different behaviors depending on # whether root_names is empty. for variable in self.unordered_variable_iterator(): # If root_names is set, we only set those variable to be fed. Otherwise, all roots would be fed. if self.root_names: if variable.onnx_name in self.root_names: variable.is_fed = True else: variable.is_fed = False else: variable.is_fed = True variable.is_root = True variable.is_leaf = True # Then, we flip some flags by applying some simple rules so that only # 1. all roots get is_root=True and is_fed=True # 2. all leaves get is_leaf=True for operator in self.unordered_operator_iterator(): operator.is_evaluated = False # All operators are not processed in the beginning for variable in operator.outputs: # Output cannot be fed before graph traversing variable.is_fed = False # If the variable is an output of one operator, it must not be a root variable.is_root = False for variable in operator.inputs: # If the variable is an input of one operator, it must not be a leaf variable.is_leaf = False
python
{ "resource": "" }
q32995
Topology._infer_all_types
train
def _infer_all_types(self): ''' Infer all variables' shapes in the computational graph. ''' self._initialize_graph_status_for_traversing() # Deliver user-specified types to root variables for raw_name, initial_type in self.initial_types: # Check all variables declared using raw_name in the whole graph for scope in self.scopes: # Skip scopes without having the considered variable name if raw_name not in scope.variable_name_mapping: continue # Assign initial_type to all variables declared using raw_name for onnx_name in scope.variable_name_mapping[raw_name]: variable = scope.variables[onnx_name] if variable.is_root: # Assign type to the root; existing type produced by parser may be overwritten variable.type = initial_type # Traverse the graph from roots to leaves for operator in self.topological_operator_iterator(): if operator.type in self.custom_shape_calculators: self.custom_shape_calculators[operator.type](operator) elif operator.type in self.custom_conversion_functions: pass # in Keras converter, the shape calculator can be optional. else: operator.infer_types()
python
{ "resource": "" }
q32996
Topology._resolve_duplicates
train
def _resolve_duplicates(self): ''' Merge variables connected by identity operator to reduce the number of redundant variables ''' self._initialize_graph_status_for_traversing() # Traverse the graph from roots to leaves for operator in self.topological_operator_iterator(): if operator.type != 'identity': continue if any(variable.is_root for variable in operator.inputs) and \ any(variable.is_leaf for variable in operator.outputs): continue # Replace the output variable with the input variable everywhere original = operator.inputs[0] duplicate = operator.outputs[0] for another_scope in self.scopes: for another_operator in another_scope.operators.values(): for i in range(len(another_operator.inputs)): if another_operator.inputs[i].onnx_name != duplicate.onnx_name: continue another_operator.inputs[i] = original # When original variable's documentation string or denotation is empty but duplicate's is not, we # copy that field to the original variable to avoid information loss. if not original.type.doc_string and duplicate.type.doc_string: original.type.doc_string = duplicate.type.doc_string if isinstance(original.type, TensorType) and isinstance(duplicate.type, TensorType): if not original.type.denotation and duplicate.type.denotation: original.type.denotation = duplicate.type.denotation if not original.type.channel_denotations: original.type.channel_denotations = duplicate.type.channel_denotations elif duplicate.type.channel_denotations: # Merge the channel denotations if available in both the original and the duplicate for i in range(len(original.type.channel_denotations)): if original.type.channel_denotations[i]: continue original.type.channel_denotations[i] = duplicate.type.channel_denotations[i] # Sometime, shapes of duplicates are different. We try to replace the original variable's unknown dimensions # as many as possible because we will get rid of the duplicate. if len(original.type.shape) == len(duplicate.type.shape): for i in range(len(original.type.shape)): if original.type.shape[i] != 'None': continue original.type.shape[i] = duplicate.type.shape[i] # Because we're iterating through the topology, we cannot delete any operator or variable. Otherwise, # the traversing function may be broken. We will delete those abandoned ones later. duplicate.is_abandoned = True operator.is_abandoned = True for scope in self.scopes: # Find out who is going to be abandoned abandoned_operator_names = set(onnx_name for onnx_name, operator in scope.operators.items() if operator.is_abandoned) abandoned_variable_names = set(onnx_name for onnx_name, variable in scope.variables.items() if variable.is_abandoned) # Remove abandoned operators for name in abandoned_operator_names: scope.delete_local_operator(name) # Remove abandoned variables for name in abandoned_variable_names: scope.delete_local_variable(name)
python
{ "resource": "" }
q32997
Topology.compile
train
def compile(self): ''' This function aims at giving every operator enough information so that all operator conversions can happen independently. We also want to check, fix, and simplify the network structure here. ''' self._prune() self._resolve_duplicates() self._fix_shapes() self._infer_all_types() self._check_structure()
python
{ "resource": "" }
q32998
convert_tensor_to_probability_map
train
def convert_tensor_to_probability_map(scope, operator, container): ''' This converter tries to convert a special operator 'TensorToProbabilityMap' into a sequence of some ONNX operators. Those operators are used to create a dictionary in which keys are class labels and values are the associated probabilities. We assume that the elements in the given probability tensor are aligned with the class labels specified in the CoreML model. Notice that ONNX<1.2 doesn't support a CoreML classifier with a batch size larger than one because old ONNX ZipMap is not able to produce a sequence of dictionaries. This issue has been fixed in ONNX-1.2. ''' attrs = {'name': scope.get_unique_operator_name('ZipMap')} model_type = operator.raw_operator.WhichOneof('Type') if model_type == 'neuralNetworkClassifier': model = operator.raw_operator.neuralNetworkClassifier if model.WhichOneof('ClassLabels') == 'stringClassLabels': attrs['classlabels_strings'] = list(s.encode('utf-8') for s in model.stringClassLabels.vector) elif model.WhichOneof('ClassLabels') == 'int64ClassLabels': attrs['classlabels_int64s'] = list(int(i) for i in model.int64ClassLabels.vector) else: raise ValueError('Unknown label type found') elif model_type == 'pipelineClassifier': model = operator.raw_operator.pipelineClassifier if model.WhichOneof('ClassLabels') == 'stringClassLabels': attrs['classlabels_strings'] = list(s.encode('utf-8') for s in model.stringClassLabels.vector) elif model.WhichOneof('ClassLabels') == 'int64ClassLabels': attrs['classlabels_int64s'] = list(int(i) for i in model.int64ClassLabels.vector) else: raise ValueError('Unknown label type found') else: raise TypeError('Only neural network classifiers and pipeline classifiers are supported') input_shape = operator.inputs[0].type.shape if len(operator.inputs[0].type.shape) != 2: # Calculate the shape attribute of ONNX Reshape if input_shape[0] != 'None': N = input_shape[0] else: N = -1 # -1 means that this dimension is automatically determined in runtime and unknown in conversion time if all(isinstance(i, numbers.Integral) for i in input_shape[1:]): C = 1 for i in input_shape[1:]: C *= int(i) else: C = -1 # -1 means that this dimension is automatically determined in runtime and unknown in conversion time # ZipMap in ONNX only accepts [C] and [N, C] inputs. In cases of [N, C, 1, 1], we reshape the probability tensor # into [N, C] before feeding it into ZipMap. buffer_name = scope.get_unique_variable_name('buffer') apply_reshape(scope, operator.inputs[0].full_name, buffer_name, container, desired_shape=[N, C]) else: buffer_name = operator.inputs[0].full_name container.add_node('ZipMap', buffer_name, operator.outputs[0].full_name, op_domain='ai.onnx.ml', **attrs)
python
{ "resource": "" }
q32999
calculate_bidirectional_lstm_output_shapes
train
def calculate_bidirectional_lstm_output_shapes(operator): ''' See bidirectional LSTM's conversion function for its output shapes. ''' check_input_and_output_numbers(operator, input_count_range=[1, 5], output_count_range=[1, 5]) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input_shape = operator.inputs[0].type.shape # LSTM accepts [N, C] and [N, C, 1, 1] inputs if len(input_shape) not in [2, 4]: raise RuntimeError('Input must be a 2-D or 4-D tensor') params = operator.raw_operator.biDirectionalLSTM # The following line is more accurate but it may break some tests # output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, 2 *params.outputVectorSize] output_shape = ['None', 2 * params.outputVectorSize] state_shape = [1, params.outputVectorSize] # TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function if len(operator.inputs) > 1: Y_h_in = operator.inputs[1] # The forward initial hidden state of a single sequence Y_h_in.type.shape = state_shape Y_h_rev_in = operator.inputs[3] # The backward initial hidden state of a single sequence Y_h_rev_in.type.shape = state_shape if len(operator.inputs) > 2: Y_c_in = operator.inputs[2] # The forward initial cell state of a single sequence Y_c_in.type.shape = state_shape Y_c_rev_in = operator.inputs[4] # The backward initial cell state of a single sequence Y_c_rev_in.type.shape = state_shape operator.outputs[0].type.shape = output_shape if len(operator.outputs) > 1: operator.outputs[1].type.shape = state_shape operator.outputs[3].type.shape = state_shape if len(operator.outputs) > 2: operator.outputs[2].type.shape = state_shape operator.outputs[4].type.shape = state_shape
python
{ "resource": "" }