repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
sonyxperiadev/pygerrit
pygerrit/stream.py
GerritStream.run
def run(self): """ Listen to the stream and send events to the client. """ channel = self._ssh_client.get_transport().open_session() self._channel = channel channel.exec_command("gerrit stream-events") stdout = channel.makefile() stderr = channel.makefile_stderr() while not self._stop.is_set(): try: if channel.exit_status_ready(): if channel.recv_stderr_ready(): error = stderr.readline().strip() else: error = "Remote server connection closed" self._error_event(error) self._stop.set() else: data = stdout.readline() self._gerrit.put_event(data) except Exception as e: # pylint: disable=W0703 self._error_event(repr(e)) self._stop.set()
python
def run(self): """ Listen to the stream and send events to the client. """ channel = self._ssh_client.get_transport().open_session() self._channel = channel channel.exec_command("gerrit stream-events") stdout = channel.makefile() stderr = channel.makefile_stderr() while not self._stop.is_set(): try: if channel.exit_status_ready(): if channel.recv_stderr_ready(): error = stderr.readline().strip() else: error = "Remote server connection closed" self._error_event(error) self._stop.set() else: data = stdout.readline() self._gerrit.put_event(data) except Exception as e: # pylint: disable=W0703 self._error_event(repr(e)) self._stop.set()
[ "def", "run", "(", "self", ")", ":", "channel", "=", "self", ".", "_ssh_client", ".", "get_transport", "(", ")", ".", "open_session", "(", ")", "self", ".", "_channel", "=", "channel", "channel", ".", "exec_command", "(", "\"gerrit stream-events\"", ")", "stdout", "=", "channel", ".", "makefile", "(", ")", "stderr", "=", "channel", ".", "makefile_stderr", "(", ")", "while", "not", "self", ".", "_stop", ".", "is_set", "(", ")", ":", "try", ":", "if", "channel", ".", "exit_status_ready", "(", ")", ":", "if", "channel", ".", "recv_stderr_ready", "(", ")", ":", "error", "=", "stderr", ".", "readline", "(", ")", ".", "strip", "(", ")", "else", ":", "error", "=", "\"Remote server connection closed\"", "self", ".", "_error_event", "(", "error", ")", "self", ".", "_stop", ".", "set", "(", ")", "else", ":", "data", "=", "stdout", ".", "readline", "(", ")", "self", ".", "_gerrit", ".", "put_event", "(", "data", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=W0703", "self", ".", "_error_event", "(", "repr", "(", "e", ")", ")", "self", ".", "_stop", ".", "set", "(", ")" ]
Listen to the stream and send events to the client.
[ "Listen", "to", "the", "stream", "and", "send", "events", "to", "the", "client", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/stream.py#L56-L77
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.run_command
def run_command(self, command): """ Run a command. :arg str command: The command to run. :Return: The result as a string. :Raises: `ValueError` if `command` is not a string. """ if not isinstance(command, basestring): raise ValueError("command must be a string") return self._ssh_client.run_gerrit_command(command)
python
def run_command(self, command): """ Run a command. :arg str command: The command to run. :Return: The result as a string. :Raises: `ValueError` if `command` is not a string. """ if not isinstance(command, basestring): raise ValueError("command must be a string") return self._ssh_client.run_gerrit_command(command)
[ "def", "run_command", "(", "self", ",", "command", ")", ":", "if", "not", "isinstance", "(", "command", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"command must be a string\"", ")", "return", "self", ".", "_ssh_client", ".", "run_gerrit_command", "(", "command", ")" ]
Run a command. :arg str command: The command to run. :Return: The result as a string. :Raises: `ValueError` if `command` is not a string.
[ "Run", "a", "command", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L79-L91
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.query
def query(self, term): """ Run a query. :arg str term: The query term to run. :Returns: A list of results as :class:`pygerrit.models.Change` objects. :Raises: `ValueError` if `term` is not a string. """ results = [] command = ["query", "--current-patch-set", "--all-approvals", "--format JSON", "--commit-message"] if not isinstance(term, basestring): raise ValueError("term must be a string") command.append(escape_string(term)) result = self._ssh_client.run_gerrit_command(" ".join(command)) decoder = JSONDecoder() for line in result.stdout.read().splitlines(): # Gerrit's response to the query command contains one or more # lines of JSON-encoded strings. The last one is a status # dictionary containing the key "type" whose value indicates # whether or not the operation was successful. # According to http://goo.gl/h13HD it should be safe to use the # presence of the "type" key to determine whether the dictionary # represents a change or if it's the query status indicator. try: data = decoder.decode(line) except ValueError as err: raise GerritError("Query returned invalid data: %s", err) if "type" in data and data["type"] == "error": raise GerritError("Query error: %s" % data["message"]) elif "project" in data: results.append(Change(data)) return results
python
def query(self, term): """ Run a query. :arg str term: The query term to run. :Returns: A list of results as :class:`pygerrit.models.Change` objects. :Raises: `ValueError` if `term` is not a string. """ results = [] command = ["query", "--current-patch-set", "--all-approvals", "--format JSON", "--commit-message"] if not isinstance(term, basestring): raise ValueError("term must be a string") command.append(escape_string(term)) result = self._ssh_client.run_gerrit_command(" ".join(command)) decoder = JSONDecoder() for line in result.stdout.read().splitlines(): # Gerrit's response to the query command contains one or more # lines of JSON-encoded strings. The last one is a status # dictionary containing the key "type" whose value indicates # whether or not the operation was successful. # According to http://goo.gl/h13HD it should be safe to use the # presence of the "type" key to determine whether the dictionary # represents a change or if it's the query status indicator. try: data = decoder.decode(line) except ValueError as err: raise GerritError("Query returned invalid data: %s", err) if "type" in data and data["type"] == "error": raise GerritError("Query error: %s" % data["message"]) elif "project" in data: results.append(Change(data)) return results
[ "def", "query", "(", "self", ",", "term", ")", ":", "results", "=", "[", "]", "command", "=", "[", "\"query\"", ",", "\"--current-patch-set\"", ",", "\"--all-approvals\"", ",", "\"--format JSON\"", ",", "\"--commit-message\"", "]", "if", "not", "isinstance", "(", "term", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"term must be a string\"", ")", "command", ".", "append", "(", "escape_string", "(", "term", ")", ")", "result", "=", "self", ".", "_ssh_client", ".", "run_gerrit_command", "(", "\" \"", ".", "join", "(", "command", ")", ")", "decoder", "=", "JSONDecoder", "(", ")", "for", "line", "in", "result", ".", "stdout", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "# Gerrit's response to the query command contains one or more", "# lines of JSON-encoded strings. The last one is a status", "# dictionary containing the key \"type\" whose value indicates", "# whether or not the operation was successful.", "# According to http://goo.gl/h13HD it should be safe to use the", "# presence of the \"type\" key to determine whether the dictionary", "# represents a change or if it's the query status indicator.", "try", ":", "data", "=", "decoder", ".", "decode", "(", "line", ")", "except", "ValueError", "as", "err", ":", "raise", "GerritError", "(", "\"Query returned invalid data: %s\"", ",", "err", ")", "if", "\"type\"", "in", "data", "and", "data", "[", "\"type\"", "]", "==", "\"error\"", ":", "raise", "GerritError", "(", "\"Query error: %s\"", "%", "data", "[", "\"message\"", "]", ")", "elif", "\"project\"", "in", "data", ":", "results", ".", "append", "(", "Change", "(", "data", ")", ")", "return", "results" ]
Run a query. :arg str term: The query term to run. :Returns: A list of results as :class:`pygerrit.models.Change` objects. :Raises: `ValueError` if `term` is not a string.
[ "Run", "a", "query", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L93-L129
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.start_event_stream
def start_event_stream(self): """ Start streaming events from `gerrit stream-events`. """ if not self._stream: self._stream = GerritStream(self, ssh_client=self._ssh_client) self._stream.start()
python
def start_event_stream(self): """ Start streaming events from `gerrit stream-events`. """ if not self._stream: self._stream = GerritStream(self, ssh_client=self._ssh_client) self._stream.start()
[ "def", "start_event_stream", "(", "self", ")", ":", "if", "not", "self", ".", "_stream", ":", "self", ".", "_stream", "=", "GerritStream", "(", "self", ",", "ssh_client", "=", "self", ".", "_ssh_client", ")", "self", ".", "_stream", ".", "start", "(", ")" ]
Start streaming events from `gerrit stream-events`.
[ "Start", "streaming", "events", "from", "gerrit", "stream", "-", "events", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L131-L135
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.stop_event_stream
def stop_event_stream(self): """ Stop streaming events from `gerrit stream-events`.""" if self._stream: self._stream.stop() self._stream.join() self._stream = None with self._events.mutex: self._events.queue.clear()
python
def stop_event_stream(self): """ Stop streaming events from `gerrit stream-events`.""" if self._stream: self._stream.stop() self._stream.join() self._stream = None with self._events.mutex: self._events.queue.clear()
[ "def", "stop_event_stream", "(", "self", ")", ":", "if", "self", ".", "_stream", ":", "self", ".", "_stream", ".", "stop", "(", ")", "self", ".", "_stream", ".", "join", "(", ")", "self", ".", "_stream", "=", "None", "with", "self", ".", "_events", ".", "mutex", ":", "self", ".", "_events", ".", "queue", ".", "clear", "(", ")" ]
Stop streaming events from `gerrit stream-events`.
[ "Stop", "streaming", "events", "from", "gerrit", "stream", "-", "events", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L137-L144
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.get_event
def get_event(self, block=True, timeout=None): """ Get the next event from the queue. :arg boolean block: Set to True to block if no event is available. :arg seconds timeout: Timeout to wait if no event is available. :Returns: The next event as a :class:`pygerrit.events.GerritEvent` instance, or `None` if: - `block` is False and there is no event available in the queue, or - `block` is True and no event is available within the time specified by `timeout`. """ try: return self._events.get(block, timeout) except Empty: return None
python
def get_event(self, block=True, timeout=None): """ Get the next event from the queue. :arg boolean block: Set to True to block if no event is available. :arg seconds timeout: Timeout to wait if no event is available. :Returns: The next event as a :class:`pygerrit.events.GerritEvent` instance, or `None` if: - `block` is False and there is no event available in the queue, or - `block` is True and no event is available within the time specified by `timeout`. """ try: return self._events.get(block, timeout) except Empty: return None
[ "def", "get_event", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "try", ":", "return", "self", ".", "_events", ".", "get", "(", "block", ",", "timeout", ")", "except", "Empty", ":", "return", "None" ]
Get the next event from the queue. :arg boolean block: Set to True to block if no event is available. :arg seconds timeout: Timeout to wait if no event is available. :Returns: The next event as a :class:`pygerrit.events.GerritEvent` instance, or `None` if: - `block` is False and there is no event available in the queue, or - `block` is True and no event is available within the time specified by `timeout`.
[ "Get", "the", "next", "event", "from", "the", "queue", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L146-L162
sonyxperiadev/pygerrit
pygerrit/client.py
GerritClient.put_event
def put_event(self, data): """ Create event from `data` and add it to the queue. :arg json data: The JSON data from which to create the event. :Raises: :class:`pygerrit.error.GerritError` if the queue is full, or the factory could not create the event. """ try: event = self._factory.create(data) self._events.put(event) except Full: raise GerritError("Unable to add event: queue is full")
python
def put_event(self, data): """ Create event from `data` and add it to the queue. :arg json data: The JSON data from which to create the event. :Raises: :class:`pygerrit.error.GerritError` if the queue is full, or the factory could not create the event. """ try: event = self._factory.create(data) self._events.put(event) except Full: raise GerritError("Unable to add event: queue is full")
[ "def", "put_event", "(", "self", ",", "data", ")", ":", "try", ":", "event", "=", "self", ".", "_factory", ".", "create", "(", "data", ")", "self", ".", "_events", ".", "put", "(", "event", ")", "except", "Full", ":", "raise", "GerritError", "(", "\"Unable to add event: queue is full\"", ")" ]
Create event from `data` and add it to the queue. :arg json data: The JSON data from which to create the event. :Raises: :class:`pygerrit.error.GerritError` if the queue is full, or the factory could not create the event.
[ "Create", "event", "from", "data", "and", "add", "it", "to", "the", "queue", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L164-L177
sonyxperiadev/pygerrit
pygerrit/ssh.py
_extract_version
def _extract_version(version_string, pattern): """ Extract the version from `version_string` using `pattern`. Return the version as a string, with leading/trailing whitespace stripped. """ if version_string: match = pattern.match(version_string.strip()) if match: return match.group(1) return ""
python
def _extract_version(version_string, pattern): """ Extract the version from `version_string` using `pattern`. Return the version as a string, with leading/trailing whitespace stripped. """ if version_string: match = pattern.match(version_string.strip()) if match: return match.group(1) return ""
[ "def", "_extract_version", "(", "version_string", ",", "pattern", ")", ":", "if", "version_string", ":", "match", "=", "pattern", ".", "match", "(", "version_string", ".", "strip", "(", ")", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ")", "return", "\"\"" ]
Extract the version from `version_string` using `pattern`. Return the version as a string, with leading/trailing whitespace stripped.
[ "Extract", "the", "version", "from", "version_string", "using", "pattern", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L36-L47
sonyxperiadev/pygerrit
pygerrit/ssh.py
GerritSSHClient._configure
def _configure(self): """ Configure the ssh parameters from the config file. """ configfile = expanduser("~/.ssh/config") if not isfile(configfile): raise GerritError("ssh config file '%s' does not exist" % configfile) config = SSHConfig() config.parse(open(configfile)) data = config.lookup(self.hostname) if not data: raise GerritError("No ssh config for host %s" % self.hostname) if 'hostname' not in data or 'port' not in data or 'user' not in data: raise GerritError("Missing configuration data in %s" % configfile) self.hostname = data['hostname'] self.username = data['user'] if 'identityfile' in data: key_filename = abspath(expanduser(data['identityfile'][0])) if not isfile(key_filename): raise GerritError("Identity file '%s' does not exist" % key_filename) self.key_filename = key_filename try: self.port = int(data['port']) except ValueError: raise GerritError("Invalid port: %s" % data['port']) if 'proxycommand' in data: self.proxy = ProxyCommand(data['proxycommand'])
python
def _configure(self): """ Configure the ssh parameters from the config file. """ configfile = expanduser("~/.ssh/config") if not isfile(configfile): raise GerritError("ssh config file '%s' does not exist" % configfile) config = SSHConfig() config.parse(open(configfile)) data = config.lookup(self.hostname) if not data: raise GerritError("No ssh config for host %s" % self.hostname) if 'hostname' not in data or 'port' not in data or 'user' not in data: raise GerritError("Missing configuration data in %s" % configfile) self.hostname = data['hostname'] self.username = data['user'] if 'identityfile' in data: key_filename = abspath(expanduser(data['identityfile'][0])) if not isfile(key_filename): raise GerritError("Identity file '%s' does not exist" % key_filename) self.key_filename = key_filename try: self.port = int(data['port']) except ValueError: raise GerritError("Invalid port: %s" % data['port']) if 'proxycommand' in data: self.proxy = ProxyCommand(data['proxycommand'])
[ "def", "_configure", "(", "self", ")", ":", "configfile", "=", "expanduser", "(", "\"~/.ssh/config\"", ")", "if", "not", "isfile", "(", "configfile", ")", ":", "raise", "GerritError", "(", "\"ssh config file '%s' does not exist\"", "%", "configfile", ")", "config", "=", "SSHConfig", "(", ")", "config", ".", "parse", "(", "open", "(", "configfile", ")", ")", "data", "=", "config", ".", "lookup", "(", "self", ".", "hostname", ")", "if", "not", "data", ":", "raise", "GerritError", "(", "\"No ssh config for host %s\"", "%", "self", ".", "hostname", ")", "if", "'hostname'", "not", "in", "data", "or", "'port'", "not", "in", "data", "or", "'user'", "not", "in", "data", ":", "raise", "GerritError", "(", "\"Missing configuration data in %s\"", "%", "configfile", ")", "self", ".", "hostname", "=", "data", "[", "'hostname'", "]", "self", ".", "username", "=", "data", "[", "'user'", "]", "if", "'identityfile'", "in", "data", ":", "key_filename", "=", "abspath", "(", "expanduser", "(", "data", "[", "'identityfile'", "]", "[", "0", "]", ")", ")", "if", "not", "isfile", "(", "key_filename", ")", ":", "raise", "GerritError", "(", "\"Identity file '%s' does not exist\"", "%", "key_filename", ")", "self", ".", "key_filename", "=", "key_filename", "try", ":", "self", ".", "port", "=", "int", "(", "data", "[", "'port'", "]", ")", "except", "ValueError", ":", "raise", "GerritError", "(", "\"Invalid port: %s\"", "%", "data", "[", "'port'", "]", ")", "if", "'proxycommand'", "in", "data", ":", "self", ".", "proxy", "=", "ProxyCommand", "(", "data", "[", "'proxycommand'", "]", ")" ]
Configure the ssh parameters from the config file.
[ "Configure", "the", "ssh", "parameters", "from", "the", "config", "file", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L84-L111
sonyxperiadev/pygerrit
pygerrit/ssh.py
GerritSSHClient._do_connect
def _do_connect(self): """ Connect to the remote. """ self.load_system_host_keys() if self.username is None or self.port is None: self._configure() try: self.connect(hostname=self.hostname, port=self.port, username=self.username, key_filename=self.key_filename, sock=self.proxy) except socket.error as e: raise GerritError("Failed to connect to server: %s" % e) try: version_string = self._transport.remote_version pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$') self.remote_version = _extract_version(version_string, pattern) except AttributeError: self.remote_version = None
python
def _do_connect(self): """ Connect to the remote. """ self.load_system_host_keys() if self.username is None or self.port is None: self._configure() try: self.connect(hostname=self.hostname, port=self.port, username=self.username, key_filename=self.key_filename, sock=self.proxy) except socket.error as e: raise GerritError("Failed to connect to server: %s" % e) try: version_string = self._transport.remote_version pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$') self.remote_version = _extract_version(version_string, pattern) except AttributeError: self.remote_version = None
[ "def", "_do_connect", "(", "self", ")", ":", "self", ".", "load_system_host_keys", "(", ")", "if", "self", ".", "username", "is", "None", "or", "self", ".", "port", "is", "None", ":", "self", ".", "_configure", "(", ")", "try", ":", "self", ".", "connect", "(", "hostname", "=", "self", ".", "hostname", ",", "port", "=", "self", ".", "port", ",", "username", "=", "self", ".", "username", ",", "key_filename", "=", "self", ".", "key_filename", ",", "sock", "=", "self", ".", "proxy", ")", "except", "socket", ".", "error", "as", "e", ":", "raise", "GerritError", "(", "\"Failed to connect to server: %s\"", "%", "e", ")", "try", ":", "version_string", "=", "self", ".", "_transport", ".", "remote_version", "pattern", "=", "re", ".", "compile", "(", "r'^.*GerritCodeReview_([a-z0-9-\\.]*) .*$'", ")", "self", ".", "remote_version", "=", "_extract_version", "(", "version_string", ",", "pattern", ")", "except", "AttributeError", ":", "self", ".", "remote_version", "=", "None" ]
Connect to the remote.
[ "Connect", "to", "the", "remote", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L113-L132
sonyxperiadev/pygerrit
pygerrit/ssh.py
GerritSSHClient._connect
def _connect(self): """ Connect to the remote if not already connected. """ if not self.connected.is_set(): try: self.lock.acquire() # Another thread may have connected while we were # waiting to acquire the lock if not self.connected.is_set(): self._do_connect() if self.keepalive: self._transport.set_keepalive(self.keepalive) self.connected.set() except GerritError: raise finally: self.lock.release()
python
def _connect(self): """ Connect to the remote if not already connected. """ if not self.connected.is_set(): try: self.lock.acquire() # Another thread may have connected while we were # waiting to acquire the lock if not self.connected.is_set(): self._do_connect() if self.keepalive: self._transport.set_keepalive(self.keepalive) self.connected.set() except GerritError: raise finally: self.lock.release()
[ "def", "_connect", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ".", "is_set", "(", ")", ":", "try", ":", "self", ".", "lock", ".", "acquire", "(", ")", "# Another thread may have connected while we were", "# waiting to acquire the lock", "if", "not", "self", ".", "connected", ".", "is_set", "(", ")", ":", "self", ".", "_do_connect", "(", ")", "if", "self", ".", "keepalive", ":", "self", ".", "_transport", ".", "set_keepalive", "(", "self", ".", "keepalive", ")", "self", ".", "connected", ".", "set", "(", ")", "except", "GerritError", ":", "raise", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
Connect to the remote if not already connected.
[ "Connect", "to", "the", "remote", "if", "not", "already", "connected", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L134-L149
sonyxperiadev/pygerrit
pygerrit/ssh.py
GerritSSHClient.get_remote_version
def get_remote_version(self): """ Return the version of the remote Gerrit server. """ if self.remote_version is None: result = self.run_gerrit_command("version") version_string = result.stdout.read() pattern = re.compile(r'^gerrit version (.*)$') self.remote_version = _extract_version(version_string, pattern) return self.remote_version
python
def get_remote_version(self): """ Return the version of the remote Gerrit server. """ if self.remote_version is None: result = self.run_gerrit_command("version") version_string = result.stdout.read() pattern = re.compile(r'^gerrit version (.*)$') self.remote_version = _extract_version(version_string, pattern) return self.remote_version
[ "def", "get_remote_version", "(", "self", ")", ":", "if", "self", ".", "remote_version", "is", "None", ":", "result", "=", "self", ".", "run_gerrit_command", "(", "\"version\"", ")", "version_string", "=", "result", ".", "stdout", ".", "read", "(", ")", "pattern", "=", "re", ".", "compile", "(", "r'^gerrit version (.*)$'", ")", "self", ".", "remote_version", "=", "_extract_version", "(", "version_string", ",", "pattern", ")", "return", "self", ".", "remote_version" ]
Return the version of the remote Gerrit server.
[ "Return", "the", "version", "of", "the", "remote", "Gerrit", "server", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L151-L158
sonyxperiadev/pygerrit
pygerrit/ssh.py
GerritSSHClient.run_gerrit_command
def run_gerrit_command(self, command): """ Run the given command. Make sure we're connected to the remote server, and run `command`. Return the results as a `GerritSSHCommandResult`. Raise `ValueError` if `command` is not a string, or `GerritError` if command execution fails. """ if not isinstance(command, basestring): raise ValueError("command must be a string") gerrit_command = "gerrit " + command # are we sending non-ascii data? try: gerrit_command.encode('ascii') except UnicodeEncodeError: gerrit_command = gerrit_command.encode('utf-8') self._connect() try: stdin, stdout, stderr = self.exec_command(gerrit_command, bufsize=1, timeout=None, get_pty=False) except SSHException as err: raise GerritError("Command execution error: %s" % err) return GerritSSHCommandResult(command, stdin, stdout, stderr)
python
def run_gerrit_command(self, command): """ Run the given command. Make sure we're connected to the remote server, and run `command`. Return the results as a `GerritSSHCommandResult`. Raise `ValueError` if `command` is not a string, or `GerritError` if command execution fails. """ if not isinstance(command, basestring): raise ValueError("command must be a string") gerrit_command = "gerrit " + command # are we sending non-ascii data? try: gerrit_command.encode('ascii') except UnicodeEncodeError: gerrit_command = gerrit_command.encode('utf-8') self._connect() try: stdin, stdout, stderr = self.exec_command(gerrit_command, bufsize=1, timeout=None, get_pty=False) except SSHException as err: raise GerritError("Command execution error: %s" % err) return GerritSSHCommandResult(command, stdin, stdout, stderr)
[ "def", "run_gerrit_command", "(", "self", ",", "command", ")", ":", "if", "not", "isinstance", "(", "command", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"command must be a string\"", ")", "gerrit_command", "=", "\"gerrit \"", "+", "command", "# are we sending non-ascii data?", "try", ":", "gerrit_command", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeEncodeError", ":", "gerrit_command", "=", "gerrit_command", ".", "encode", "(", "'utf-8'", ")", "self", ".", "_connect", "(", ")", "try", ":", "stdin", ",", "stdout", ",", "stderr", "=", "self", ".", "exec_command", "(", "gerrit_command", ",", "bufsize", "=", "1", ",", "timeout", "=", "None", ",", "get_pty", "=", "False", ")", "except", "SSHException", "as", "err", ":", "raise", "GerritError", "(", "\"Command execution error: %s\"", "%", "err", ")", "return", "GerritSSHCommandResult", "(", "command", ",", "stdin", ",", "stdout", ",", "stderr", ")" ]
Run the given command. Make sure we're connected to the remote server, and run `command`. Return the results as a `GerritSSHCommandResult`. Raise `ValueError` if `command` is not a string, or `GerritError` if command execution fails.
[ "Run", "the", "given", "command", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L165-L194
sonyxperiadev/pygerrit
pygerrit/events.py
GerritEventFactory.register
def register(cls, name): """ Decorator to register the event identified by `name`. Return the decorated class. Raise GerritError if the event is already registered. """ def decorate(klazz): """ Decorator. """ if name in cls._events: raise GerritError("Duplicate event: %s" % name) cls._events[name] = [klazz.__module__, klazz.__name__] klazz.name = name return klazz return decorate
python
def register(cls, name): """ Decorator to register the event identified by `name`. Return the decorated class. Raise GerritError if the event is already registered. """ def decorate(klazz): """ Decorator. """ if name in cls._events: raise GerritError("Duplicate event: %s" % name) cls._events[name] = [klazz.__module__, klazz.__name__] klazz.name = name return klazz return decorate
[ "def", "register", "(", "cls", ",", "name", ")", ":", "def", "decorate", "(", "klazz", ")", ":", "\"\"\" Decorator. \"\"\"", "if", "name", "in", "cls", ".", "_events", ":", "raise", "GerritError", "(", "\"Duplicate event: %s\"", "%", "name", ")", "cls", ".", "_events", "[", "name", "]", "=", "[", "klazz", ".", "__module__", ",", "klazz", ".", "__name__", "]", "klazz", ".", "name", "=", "name", "return", "klazz", "return", "decorate" ]
Decorator to register the event identified by `name`. Return the decorated class. Raise GerritError if the event is already registered.
[ "Decorator", "to", "register", "the", "event", "identified", "by", "name", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/events.py#L40-L56
sonyxperiadev/pygerrit
pygerrit/events.py
GerritEventFactory.create
def create(cls, data): """ Create a new event instance. Return an instance of the `GerritEvent` subclass after converting `data` to json. Raise GerritError if json parsed from `data` does not contain a `type` key. """ try: json_data = json.loads(data) except ValueError as err: logging.debug("Failed to load json data: %s: [%s]", str(err), data) json_data = json.loads(ErrorEvent.error_json(err)) if "type" not in json_data: raise GerritError("`type` not in json_data") name = json_data["type"] if name not in cls._events: name = 'unhandled-event' event = cls._events[name] module_name = event[0] class_name = event[1] module = __import__(module_name, fromlist=[module_name]) klazz = getattr(module, class_name) return klazz(json_data)
python
def create(cls, data): """ Create a new event instance. Return an instance of the `GerritEvent` subclass after converting `data` to json. Raise GerritError if json parsed from `data` does not contain a `type` key. """ try: json_data = json.loads(data) except ValueError as err: logging.debug("Failed to load json data: %s: [%s]", str(err), data) json_data = json.loads(ErrorEvent.error_json(err)) if "type" not in json_data: raise GerritError("`type` not in json_data") name = json_data["type"] if name not in cls._events: name = 'unhandled-event' event = cls._events[name] module_name = event[0] class_name = event[1] module = __import__(module_name, fromlist=[module_name]) klazz = getattr(module, class_name) return klazz(json_data)
[ "def", "create", "(", "cls", ",", "data", ")", ":", "try", ":", "json_data", "=", "json", ".", "loads", "(", "data", ")", "except", "ValueError", "as", "err", ":", "logging", ".", "debug", "(", "\"Failed to load json data: %s: [%s]\"", ",", "str", "(", "err", ")", ",", "data", ")", "json_data", "=", "json", ".", "loads", "(", "ErrorEvent", ".", "error_json", "(", "err", ")", ")", "if", "\"type\"", "not", "in", "json_data", ":", "raise", "GerritError", "(", "\"`type` not in json_data\"", ")", "name", "=", "json_data", "[", "\"type\"", "]", "if", "name", "not", "in", "cls", ".", "_events", ":", "name", "=", "'unhandled-event'", "event", "=", "cls", ".", "_events", "[", "name", "]", "module_name", "=", "event", "[", "0", "]", "class_name", "=", "event", "[", "1", "]", "module", "=", "__import__", "(", "module_name", ",", "fromlist", "=", "[", "module_name", "]", ")", "klazz", "=", "getattr", "(", "module", ",", "class_name", ")", "return", "klazz", "(", "json_data", ")" ]
Create a new event instance. Return an instance of the `GerritEvent` subclass after converting `data` to json. Raise GerritError if json parsed from `data` does not contain a `type` key.
[ "Create", "a", "new", "event", "instance", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/events.py#L59-L85
sonyxperiadev/pygerrit
pygerrit/rest/__init__.py
_decode_response
def _decode_response(response): """ Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code. """ content = response.content.strip() logging.debug(content[:512]) response.raise_for_status() if content.startswith(GERRIT_MAGIC_JSON_PREFIX): content = content[len(GERRIT_MAGIC_JSON_PREFIX):] try: return json.loads(content) except ValueError: logging.error('Invalid json content: %s' % content) raise
python
def _decode_response(response): """ Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code. """ content = response.content.strip() logging.debug(content[:512]) response.raise_for_status() if content.startswith(GERRIT_MAGIC_JSON_PREFIX): content = content[len(GERRIT_MAGIC_JSON_PREFIX):] try: return json.loads(content) except ValueError: logging.error('Invalid json content: %s' % content) raise
[ "def", "_decode_response", "(", "response", ")", ":", "content", "=", "response", ".", "content", ".", "strip", "(", ")", "logging", ".", "debug", "(", "content", "[", ":", "512", "]", ")", "response", ".", "raise_for_status", "(", ")", "if", "content", ".", "startswith", "(", "GERRIT_MAGIC_JSON_PREFIX", ")", ":", "content", "=", "content", "[", "len", "(", "GERRIT_MAGIC_JSON_PREFIX", ")", ":", "]", "try", ":", "return", "json", ".", "loads", "(", "content", ")", "except", "ValueError", ":", "logging", ".", "error", "(", "'Invalid json content: %s'", "%", "content", ")", "raise" ]
Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code.
[ "Strip", "off", "Gerrit", "s", "magic", "prefix", "and", "decode", "a", "response", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L33-L53
sonyxperiadev/pygerrit
pygerrit/rest/__init__.py
GerritRestAPI.put
def put(self, endpoint, **kwargs): """ Send HTTP PUT to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ kwargs.update(self.kwargs.copy()) if "data" in kwargs: kwargs["headers"].update( {"Content-Type": "application/json;charset=UTF-8"}) response = requests.put(self.make_url(endpoint), **kwargs) return _decode_response(response)
python
def put(self, endpoint, **kwargs): """ Send HTTP PUT to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ kwargs.update(self.kwargs.copy()) if "data" in kwargs: kwargs["headers"].update( {"Content-Type": "application/json;charset=UTF-8"}) response = requests.put(self.make_url(endpoint), **kwargs) return _decode_response(response)
[ "def", "put", "(", "self", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "self", ".", "kwargs", ".", "copy", "(", ")", ")", "if", "\"data\"", "in", "kwargs", ":", "kwargs", "[", "\"headers\"", "]", ".", "update", "(", "{", "\"Content-Type\"", ":", "\"application/json;charset=UTF-8\"", "}", ")", "response", "=", "requests", ".", "put", "(", "self", ".", "make_url", "(", "endpoint", ")", ",", "*", "*", "kwargs", ")", "return", "_decode_response", "(", "response", ")" ]
Send HTTP PUT to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
[ "Send", "HTTP", "PUT", "to", "the", "endpoint", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L121-L138
sonyxperiadev/pygerrit
pygerrit/rest/__init__.py
GerritRestAPI.delete
def delete(self, endpoint, **kwargs): """ Send HTTP DELETE to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ kwargs.update(self.kwargs.copy()) response = requests.delete(self.make_url(endpoint), **kwargs) return _decode_response(response)
python
def delete(self, endpoint, **kwargs): """ Send HTTP DELETE to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ kwargs.update(self.kwargs.copy()) response = requests.delete(self.make_url(endpoint), **kwargs) return _decode_response(response)
[ "def", "delete", "(", "self", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "self", ".", "kwargs", ".", "copy", "(", ")", ")", "response", "=", "requests", ".", "delete", "(", "self", ".", "make_url", "(", "endpoint", ")", ",", "*", "*", "kwargs", ")", "return", "_decode_response", "(", "response", ")" ]
Send HTTP DELETE to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
[ "Send", "HTTP", "DELETE", "to", "the", "endpoint", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L159-L173
sonyxperiadev/pygerrit
pygerrit/rest/__init__.py
GerritRestAPI.review
def review(self, change_id, revision, review): """ Submit a review. :arg str change_id: The change ID. :arg str revision: The revision. :arg str review: The review details as a :class:`GerritReview`. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ endpoint = "changes/%s/revisions/%s/review" % (change_id, revision) self.post(endpoint, data=str(review))
python
def review(self, change_id, revision, review): """ Submit a review. :arg str change_id: The change ID. :arg str revision: The revision. :arg str review: The review details as a :class:`GerritReview`. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error. """ endpoint = "changes/%s/revisions/%s/review" % (change_id, revision) self.post(endpoint, data=str(review))
[ "def", "review", "(", "self", ",", "change_id", ",", "revision", ",", "review", ")", ":", "endpoint", "=", "\"changes/%s/revisions/%s/review\"", "%", "(", "change_id", ",", "revision", ")", "self", ".", "post", "(", "endpoint", ",", "data", "=", "str", "(", "review", ")", ")" ]
Submit a review. :arg str change_id: The change ID. :arg str revision: The revision. :arg str review: The review details as a :class:`GerritReview`. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
[ "Submit", "a", "review", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L175-L191
sonyxperiadev/pygerrit
pygerrit/rest/__init__.py
GerritReview.add_comments
def add_comments(self, comments): """ Add inline comments. :arg dict comments: Comments to add. Usage:: add_comments([{'filename': 'Makefile', 'line': 10, 'message': 'inline message'}]) add_comments([{'filename': 'Makefile', 'range': {'start_line': 0, 'start_character': 1, 'end_line': 0, 'end_character': 5}, 'message': 'inline message'}]) """ for comment in comments: if 'filename' and 'message' in comment.keys(): msg = {} if 'range' in comment.keys(): msg = {"range": comment['range'], "message": comment['message']} elif 'line' in comment.keys(): msg = {"line": comment['line'], "message": comment['message']} else: continue file_comment = {comment['filename']: [msg]} if self.comments: if comment['filename'] in self.comments.keys(): self.comments[comment['filename']].append(msg) else: self.comments.update(file_comment) else: self.comments.update(file_comment)
python
def add_comments(self, comments): """ Add inline comments. :arg dict comments: Comments to add. Usage:: add_comments([{'filename': 'Makefile', 'line': 10, 'message': 'inline message'}]) add_comments([{'filename': 'Makefile', 'range': {'start_line': 0, 'start_character': 1, 'end_line': 0, 'end_character': 5}, 'message': 'inline message'}]) """ for comment in comments: if 'filename' and 'message' in comment.keys(): msg = {} if 'range' in comment.keys(): msg = {"range": comment['range'], "message": comment['message']} elif 'line' in comment.keys(): msg = {"line": comment['line'], "message": comment['message']} else: continue file_comment = {comment['filename']: [msg]} if self.comments: if comment['filename'] in self.comments.keys(): self.comments[comment['filename']].append(msg) else: self.comments.update(file_comment) else: self.comments.update(file_comment)
[ "def", "add_comments", "(", "self", ",", "comments", ")", ":", "for", "comment", "in", "comments", ":", "if", "'filename'", "and", "'message'", "in", "comment", ".", "keys", "(", ")", ":", "msg", "=", "{", "}", "if", "'range'", "in", "comment", ".", "keys", "(", ")", ":", "msg", "=", "{", "\"range\"", ":", "comment", "[", "'range'", "]", ",", "\"message\"", ":", "comment", "[", "'message'", "]", "}", "elif", "'line'", "in", "comment", ".", "keys", "(", ")", ":", "msg", "=", "{", "\"line\"", ":", "comment", "[", "'line'", "]", ",", "\"message\"", ":", "comment", "[", "'message'", "]", "}", "else", ":", "continue", "file_comment", "=", "{", "comment", "[", "'filename'", "]", ":", "[", "msg", "]", "}", "if", "self", ".", "comments", ":", "if", "comment", "[", "'filename'", "]", "in", "self", ".", "comments", ".", "keys", "(", ")", ":", "self", ".", "comments", "[", "comment", "[", "'filename'", "]", "]", ".", "append", "(", "msg", ")", "else", ":", "self", ".", "comments", ".", "update", "(", "file_comment", ")", "else", ":", "self", ".", "comments", ".", "update", "(", "file_comment", ")" ]
Add inline comments. :arg dict comments: Comments to add. Usage:: add_comments([{'filename': 'Makefile', 'line': 10, 'message': 'inline message'}]) add_comments([{'filename': 'Makefile', 'range': {'start_line': 0, 'start_character': 1, 'end_line': 0, 'end_character': 5}, 'message': 'inline message'}])
[ "Add", "inline", "comments", "." ]
train
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/rest/__init__.py#L241-L278
SiriDB/siridb-connector
siridb/connector/lib/protocol.py
_SiriDBProtocol.connection_made
def connection_made(self, transport): ''' override asyncio.Protocol ''' self._connected = True self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.auth_future = self.send_package(protomap.CPROTO_REQ_AUTH, data=(self._username, self._password, self._dbname), timeout=10) self._password = None self.on_connection_made()
python
def connection_made(self, transport): ''' override asyncio.Protocol ''' self._connected = True self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.auth_future = self.send_package(protomap.CPROTO_REQ_AUTH, data=(self._username, self._password, self._dbname), timeout=10) self._password = None self.on_connection_made()
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "self", ".", "_connected", "=", "True", "self", ".", "transport", "=", "transport", "self", ".", "remote_ip", ",", "self", ".", "port", "=", "transport", ".", "get_extra_info", "(", "'peername'", ")", "[", ":", "2", "]", "logging", ".", "debug", "(", "'Connection made (address: {} port: {})'", ".", "format", "(", "self", ".", "remote_ip", ",", "self", ".", "port", ")", ")", "self", ".", "auth_future", "=", "self", ".", "send_package", "(", "protomap", ".", "CPROTO_REQ_AUTH", ",", "data", "=", "(", "self", ".", "_username", ",", "self", ".", "_password", ",", "self", ".", "_dbname", ")", ",", "timeout", "=", "10", ")", "self", ".", "_password", "=", "None", "self", ".", "on_connection_made", "(", ")" ]
override asyncio.Protocol
[ "override", "asyncio", ".", "Protocol" ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/protocol.py#L85-L106
SiriDB/siridb-connector
siridb/connector/lib/protocol.py
_SiriDBProtocol.connection_lost
def connection_lost(self, exc): ''' override asyncio.Protocol ''' self._connected = False logging.debug( 'Connection lost (address: {} port: {})' .format(self.remote_ip, self.port)) for pid, (future, task) in self._requests.items(): task.cancel() if future.cancelled(): continue future.set_exception(ConnectionError( 'Connection is lost before we had an answer on package id: {}.' .format(pid))) self.on_connection_lost(exc)
python
def connection_lost(self, exc): ''' override asyncio.Protocol ''' self._connected = False logging.debug( 'Connection lost (address: {} port: {})' .format(self.remote_ip, self.port)) for pid, (future, task) in self._requests.items(): task.cancel() if future.cancelled(): continue future.set_exception(ConnectionError( 'Connection is lost before we had an answer on package id: {}.' .format(pid))) self.on_connection_lost(exc)
[ "def", "connection_lost", "(", "self", ",", "exc", ")", ":", "self", ".", "_connected", "=", "False", "logging", ".", "debug", "(", "'Connection lost (address: {} port: {})'", ".", "format", "(", "self", ".", "remote_ip", ",", "self", ".", "port", ")", ")", "for", "pid", ",", "(", "future", ",", "task", ")", "in", "self", ".", "_requests", ".", "items", "(", ")", ":", "task", ".", "cancel", "(", ")", "if", "future", ".", "cancelled", "(", ")", ":", "continue", "future", ".", "set_exception", "(", "ConnectionError", "(", "'Connection is lost before we had an answer on package id: {}.'", ".", "format", "(", "pid", ")", ")", ")", "self", ".", "on_connection_lost", "(", "exc", ")" ]
override asyncio.Protocol
[ "override", "asyncio", ".", "Protocol" ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/protocol.py#L108-L126
SiriDB/siridb-connector
siridb/connector/lib/protocol.py
_SiriDBProtocol.data_received
def data_received(self, data): ''' override asyncio.Protocol ''' self._buffered_data.extend(data) while self._buffered_data: size = len(self._buffered_data) if self._data_package is None: if size < DataPackage.struct_datapackage.size: return None self._data_package = DataPackage(self._buffered_data) if size < self._data_package.length: return None try: self._data_package.extract_data_from(self._buffered_data) except KeyError as e: logging.error('Unsupported package received: {}'.format(e)) except Exception as e: logging.exception(e) # empty the byte-array to recover from this error self._buffered_data.clear() else: self._on_package_received() self._data_package = None
python
def data_received(self, data): ''' override asyncio.Protocol ''' self._buffered_data.extend(data) while self._buffered_data: size = len(self._buffered_data) if self._data_package is None: if size < DataPackage.struct_datapackage.size: return None self._data_package = DataPackage(self._buffered_data) if size < self._data_package.length: return None try: self._data_package.extract_data_from(self._buffered_data) except KeyError as e: logging.error('Unsupported package received: {}'.format(e)) except Exception as e: logging.exception(e) # empty the byte-array to recover from this error self._buffered_data.clear() else: self._on_package_received() self._data_package = None
[ "def", "data_received", "(", "self", ",", "data", ")", ":", "self", ".", "_buffered_data", ".", "extend", "(", "data", ")", "while", "self", ".", "_buffered_data", ":", "size", "=", "len", "(", "self", ".", "_buffered_data", ")", "if", "self", ".", "_data_package", "is", "None", ":", "if", "size", "<", "DataPackage", ".", "struct_datapackage", ".", "size", ":", "return", "None", "self", ".", "_data_package", "=", "DataPackage", "(", "self", ".", "_buffered_data", ")", "if", "size", "<", "self", ".", "_data_package", ".", "length", ":", "return", "None", "try", ":", "self", ".", "_data_package", ".", "extract_data_from", "(", "self", ".", "_buffered_data", ")", "except", "KeyError", "as", "e", ":", "logging", ".", "error", "(", "'Unsupported package received: {}'", ".", "format", "(", "e", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "# empty the byte-array to recover from this error", "self", ".", "_buffered_data", ".", "clear", "(", ")", "else", ":", "self", ".", "_on_package_received", "(", ")", "self", ".", "_data_package", "=", "None" ]
override asyncio.Protocol
[ "override", "asyncio", ".", "Protocol" ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/protocol.py#L128-L151
SiriDB/siridb-connector
siridb/connector/lib/protocol.py
_SiriDBInfoProtocol.connection_made
def connection_made(self, transport): ''' override _SiriDBProtocol ''' self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.future = self.send_package( protomap.CPROTO_REQ_INFO, data=None, timeout=10)
python
def connection_made(self, transport): ''' override _SiriDBProtocol ''' self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.future = self.send_package( protomap.CPROTO_REQ_INFO, data=None, timeout=10)
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "self", ".", "transport", "=", "transport", "self", ".", "remote_ip", ",", "self", ".", "port", "=", "transport", ".", "get_extra_info", "(", "'peername'", ")", "[", ":", "2", "]", "logging", ".", "debug", "(", "'Connection made (address: {} port: {})'", ".", "format", "(", "self", ".", "remote_ip", ",", "self", ".", "port", ")", ")", "self", ".", "future", "=", "self", ".", "send_package", "(", "protomap", ".", "CPROTO_REQ_INFO", ",", "data", "=", "None", ",", "timeout", "=", "10", ")" ]
override _SiriDBProtocol
[ "override", "_SiriDBProtocol" ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/protocol.py#L234-L249
SiriDB/siridb-connector
siridb/connector/lib/connection.py
SiriDBConnection._register_server
def _register_server(self, server, timeout=30): '''Register a new SiriDB Server. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' result = self._loop.run_until_complete( self._protocol.send_package(CPROTO_REQ_REGISTER_SERVER, data=server, timeout=timeout)) return result
python
def _register_server(self, server, timeout=30): '''Register a new SiriDB Server. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' result = self._loop.run_until_complete( self._protocol.send_package(CPROTO_REQ_REGISTER_SERVER, data=server, timeout=timeout)) return result
[ "def", "_register_server", "(", "self", ",", "server", ",", "timeout", "=", "30", ")", ":", "result", "=", "self", ".", "_loop", ".", "run_until_complete", "(", "self", ".", "_protocol", ".", "send_package", "(", "CPROTO_REQ_REGISTER_SERVER", ",", "data", "=", "server", ",", "timeout", "=", "timeout", ")", ")", "return", "result" ]
Register a new SiriDB Server. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request.
[ "Register", "a", "new", "SiriDB", "Server", "." ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/connection.py#L65-L75
SiriDB/siridb-connector
siridb/connector/lib/connection.py
SiriDBConnection._get_file
def _get_file(self, fn, timeout=30): '''Request a SiriDB configuration file. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' msg = FILE_MAP.get(fn, None) if msg is None: raise FileNotFoundError('Cannot get file {!r}. Available file ' 'requests are: {}' .format(fn, ', '.join(FILE_MAP.keys()))) result = self._loop.run_until_complete( self._protocol.send_package(msg, timeout=timeout)) return result
python
def _get_file(self, fn, timeout=30): '''Request a SiriDB configuration file. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request. ''' msg = FILE_MAP.get(fn, None) if msg is None: raise FileNotFoundError('Cannot get file {!r}. Available file ' 'requests are: {}' .format(fn, ', '.join(FILE_MAP.keys()))) result = self._loop.run_until_complete( self._protocol.send_package(msg, timeout=timeout)) return result
[ "def", "_get_file", "(", "self", ",", "fn", ",", "timeout", "=", "30", ")", ":", "msg", "=", "FILE_MAP", ".", "get", "(", "fn", ",", "None", ")", "if", "msg", "is", "None", ":", "raise", "FileNotFoundError", "(", "'Cannot get file {!r}. Available file '", "'requests are: {}'", ".", "format", "(", "fn", ",", "', '", ".", "join", "(", "FILE_MAP", ".", "keys", "(", ")", ")", ")", ")", "result", "=", "self", ".", "_loop", ".", "run_until_complete", "(", "self", ".", "_protocol", ".", "send_package", "(", "msg", ",", "timeout", "=", "timeout", ")", ")", "return", "result" ]
Request a SiriDB configuration file. This method is used by the SiriDB manage tool and should not be used otherwise. Full access rights are required for this request.
[ "Request", "a", "SiriDB", "configuration", "file", "." ]
train
https://github.com/SiriDB/siridb-connector/blob/dff33c183899c3ee21c3eab6b90cf4668afef1b0/siridb/connector/lib/connection.py#L77-L90
transitland/mapzen-geohash
mzgeohash/geohash.py
_bits_to_float
def _bits_to_float(bits, lower=-90.0, middle=0.0, upper=90.0): """Convert GeoHash bits to a float.""" for i in bits: if i: lower = middle else: upper = middle middle = (upper + lower) / 2 return middle
python
def _bits_to_float(bits, lower=-90.0, middle=0.0, upper=90.0): """Convert GeoHash bits to a float.""" for i in bits: if i: lower = middle else: upper = middle middle = (upper + lower) / 2 return middle
[ "def", "_bits_to_float", "(", "bits", ",", "lower", "=", "-", "90.0", ",", "middle", "=", "0.0", ",", "upper", "=", "90.0", ")", ":", "for", "i", "in", "bits", ":", "if", "i", ":", "lower", "=", "middle", "else", ":", "upper", "=", "middle", "middle", "=", "(", "upper", "+", "lower", ")", "/", "2", "return", "middle" ]
Convert GeoHash bits to a float.
[ "Convert", "GeoHash", "bits", "to", "a", "float", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L8-L16
transitland/mapzen-geohash
mzgeohash/geohash.py
_float_to_bits
def _float_to_bits(value, lower=-90.0, middle=0.0, upper=90.0, length=15): """Convert a float to a list of GeoHash bits.""" ret = [] for i in range(length): if value >= middle: lower = middle ret.append(1) else: upper = middle ret.append(0) middle = (upper + lower) / 2 return ret
python
def _float_to_bits(value, lower=-90.0, middle=0.0, upper=90.0, length=15): """Convert a float to a list of GeoHash bits.""" ret = [] for i in range(length): if value >= middle: lower = middle ret.append(1) else: upper = middle ret.append(0) middle = (upper + lower) / 2 return ret
[ "def", "_float_to_bits", "(", "value", ",", "lower", "=", "-", "90.0", ",", "middle", "=", "0.0", ",", "upper", "=", "90.0", ",", "length", "=", "15", ")", ":", "ret", "=", "[", "]", "for", "i", "in", "range", "(", "length", ")", ":", "if", "value", ">=", "middle", ":", "lower", "=", "middle", "ret", ".", "append", "(", "1", ")", "else", ":", "upper", "=", "middle", "ret", ".", "append", "(", "0", ")", "middle", "=", "(", "upper", "+", "lower", ")", "/", "2", "return", "ret" ]
Convert a float to a list of GeoHash bits.
[ "Convert", "a", "float", "to", "a", "list", "of", "GeoHash", "bits", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L18-L29
transitland/mapzen-geohash
mzgeohash/geohash.py
_geohash_to_bits
def _geohash_to_bits(value): """Convert a GeoHash to a list of GeoHash bits.""" b = map(BASE32MAP.get, value) ret = [] for i in b: out = [] for z in range(5): out.append(i & 0b1) i = i >> 1 ret += out[::-1] return ret
python
def _geohash_to_bits(value): """Convert a GeoHash to a list of GeoHash bits.""" b = map(BASE32MAP.get, value) ret = [] for i in b: out = [] for z in range(5): out.append(i & 0b1) i = i >> 1 ret += out[::-1] return ret
[ "def", "_geohash_to_bits", "(", "value", ")", ":", "b", "=", "map", "(", "BASE32MAP", ".", "get", ",", "value", ")", "ret", "=", "[", "]", "for", "i", "in", "b", ":", "out", "=", "[", "]", "for", "z", "in", "range", "(", "5", ")", ":", "out", ".", "append", "(", "i", "&", "0b1", ")", "i", "=", "i", ">>", "1", "ret", "+=", "out", "[", ":", ":", "-", "1", "]", "return", "ret" ]
Convert a GeoHash to a list of GeoHash bits.
[ "Convert", "a", "GeoHash", "to", "a", "list", "of", "GeoHash", "bits", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L31-L41
transitland/mapzen-geohash
mzgeohash/geohash.py
_bits_to_geohash
def _bits_to_geohash(value): """Convert a list of GeoHash bits to a GeoHash.""" ret = [] # Get 5 bits at a time for i in (value[i:i+5] for i in xrange(0, len(value), 5)): # Convert binary to integer # Note: reverse here, the slice above doesn't work quite right in reverse. total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])]) ret.append(BASE32MAPR[total]) # Join the string and return return "".join(ret)
python
def _bits_to_geohash(value): """Convert a list of GeoHash bits to a GeoHash.""" ret = [] # Get 5 bits at a time for i in (value[i:i+5] for i in xrange(0, len(value), 5)): # Convert binary to integer # Note: reverse here, the slice above doesn't work quite right in reverse. total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])]) ret.append(BASE32MAPR[total]) # Join the string and return return "".join(ret)
[ "def", "_bits_to_geohash", "(", "value", ")", ":", "ret", "=", "[", "]", "# Get 5 bits at a time", "for", "i", "in", "(", "value", "[", "i", ":", "i", "+", "5", "]", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "value", ")", ",", "5", ")", ")", ":", "# Convert binary to integer", "# Note: reverse here, the slice above doesn't work quite right in reverse.", "total", "=", "sum", "(", "[", "(", "bit", "*", "2", "**", "count", ")", "for", "count", ",", "bit", "in", "enumerate", "(", "i", "[", ":", ":", "-", "1", "]", ")", "]", ")", "ret", ".", "append", "(", "BASE32MAPR", "[", "total", "]", ")", "# Join the string and return", "return", "\"\"", ".", "join", "(", "ret", ")" ]
Convert a list of GeoHash bits to a GeoHash.
[ "Convert", "a", "list", "of", "GeoHash", "bits", "to", "a", "GeoHash", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L43-L53
transitland/mapzen-geohash
mzgeohash/geohash.py
decode
def decode(value): """Decode a geohash. Returns a (lon,lat) pair.""" assert value, "Invalid geohash: %s"%value # Get the GeoHash bits bits = _geohash_to_bits(value) # Unzip the GeoHash bits. lon = bits[0::2] lat = bits[1::2] # Convert to lat/lon return ( _bits_to_float(lon, lower=-180.0, upper=180.0), _bits_to_float(lat) )
python
def decode(value): """Decode a geohash. Returns a (lon,lat) pair.""" assert value, "Invalid geohash: %s"%value # Get the GeoHash bits bits = _geohash_to_bits(value) # Unzip the GeoHash bits. lon = bits[0::2] lat = bits[1::2] # Convert to lat/lon return ( _bits_to_float(lon, lower=-180.0, upper=180.0), _bits_to_float(lat) )
[ "def", "decode", "(", "value", ")", ":", "assert", "value", ",", "\"Invalid geohash: %s\"", "%", "value", "# Get the GeoHash bits", "bits", "=", "_geohash_to_bits", "(", "value", ")", "# Unzip the GeoHash bits.", "lon", "=", "bits", "[", "0", ":", ":", "2", "]", "lat", "=", "bits", "[", "1", ":", ":", "2", "]", "# Convert to lat/lon", "return", "(", "_bits_to_float", "(", "lon", ",", "lower", "=", "-", "180.0", ",", "upper", "=", "180.0", ")", ",", "_bits_to_float", "(", "lat", ")", ")" ]
Decode a geohash. Returns a (lon,lat) pair.
[ "Decode", "a", "geohash", ".", "Returns", "a", "(", "lon", "lat", ")", "pair", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L56-L68
transitland/mapzen-geohash
mzgeohash/geohash.py
encode
def encode(lonlat, length=12): """Encode a (lon,lat) pair to a GeoHash.""" assert len(lonlat) == 2, "Invalid lon/lat: %s"%lonlat # Half the length for each component. length /= 2 lon = _float_to_bits(lonlat[0], lower=-180.0, upper=180.0, length=length*5) lat = _float_to_bits(lonlat[1], lower=-90.0, upper=90.0, length=length*5) # Zip the GeoHash bits. ret = [] for a,b in zip(lon,lat): ret.append(a) ret.append(b) return _bits_to_geohash(ret)
python
def encode(lonlat, length=12): """Encode a (lon,lat) pair to a GeoHash.""" assert len(lonlat) == 2, "Invalid lon/lat: %s"%lonlat # Half the length for each component. length /= 2 lon = _float_to_bits(lonlat[0], lower=-180.0, upper=180.0, length=length*5) lat = _float_to_bits(lonlat[1], lower=-90.0, upper=90.0, length=length*5) # Zip the GeoHash bits. ret = [] for a,b in zip(lon,lat): ret.append(a) ret.append(b) return _bits_to_geohash(ret)
[ "def", "encode", "(", "lonlat", ",", "length", "=", "12", ")", ":", "assert", "len", "(", "lonlat", ")", "==", "2", ",", "\"Invalid lon/lat: %s\"", "%", "lonlat", "# Half the length for each component.", "length", "/=", "2", "lon", "=", "_float_to_bits", "(", "lonlat", "[", "0", "]", ",", "lower", "=", "-", "180.0", ",", "upper", "=", "180.0", ",", "length", "=", "length", "*", "5", ")", "lat", "=", "_float_to_bits", "(", "lonlat", "[", "1", "]", ",", "lower", "=", "-", "90.0", ",", "upper", "=", "90.0", ",", "length", "=", "length", "*", "5", ")", "# Zip the GeoHash bits.", "ret", "=", "[", "]", "for", "a", ",", "b", "in", "zip", "(", "lon", ",", "lat", ")", ":", "ret", ".", "append", "(", "a", ")", "ret", ".", "append", "(", "b", ")", "return", "_bits_to_geohash", "(", "ret", ")" ]
Encode a (lon,lat) pair to a GeoHash.
[ "Encode", "a", "(", "lon", "lat", ")", "pair", "to", "a", "GeoHash", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L70-L82
transitland/mapzen-geohash
mzgeohash/geohash.py
adjacent
def adjacent(geohash, direction): """Return the adjacent geohash for a given direction.""" # Based on an MIT licensed implementation by Chris Veness from: # http://www.movable-type.co.uk/scripts/geohash.html assert direction in 'nsew', "Invalid direction: %s"%direction assert geohash, "Invalid geohash: %s"%geohash neighbor = { 'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ], 's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ], 'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ], 'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ] } border = { 'n': [ 'prxz', 'bcfguvyz' ], 's': [ '028b', '0145hjnp' ], 'e': [ 'bcfguvyz', 'prxz' ], 'w': [ '0145hjnp', '028b' ] } last = geohash[-1] parent = geohash[0:-1] t = len(geohash) % 2 # Check for edge cases if (last in border[direction][t]) and (parent): parent = adjacent(parent, direction) return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
python
def adjacent(geohash, direction): """Return the adjacent geohash for a given direction.""" # Based on an MIT licensed implementation by Chris Veness from: # http://www.movable-type.co.uk/scripts/geohash.html assert direction in 'nsew', "Invalid direction: %s"%direction assert geohash, "Invalid geohash: %s"%geohash neighbor = { 'n': [ 'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx' ], 's': [ '14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp' ], 'e': [ 'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy' ], 'w': [ '238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb' ] } border = { 'n': [ 'prxz', 'bcfguvyz' ], 's': [ '028b', '0145hjnp' ], 'e': [ 'bcfguvyz', 'prxz' ], 'w': [ '0145hjnp', '028b' ] } last = geohash[-1] parent = geohash[0:-1] t = len(geohash) % 2 # Check for edge cases if (last in border[direction][t]) and (parent): parent = adjacent(parent, direction) return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
[ "def", "adjacent", "(", "geohash", ",", "direction", ")", ":", "# Based on an MIT licensed implementation by Chris Veness from:", "# http://www.movable-type.co.uk/scripts/geohash.html", "assert", "direction", "in", "'nsew'", ",", "\"Invalid direction: %s\"", "%", "direction", "assert", "geohash", ",", "\"Invalid geohash: %s\"", "%", "geohash", "neighbor", "=", "{", "'n'", ":", "[", "'p0r21436x8zb9dcf5h7kjnmqesgutwvy'", ",", "'bc01fg45238967deuvhjyznpkmstqrwx'", "]", ",", "'s'", ":", "[", "'14365h7k9dcfesgujnmqp0r2twvyx8zb'", ",", "'238967debc01fg45kmstqrwxuvhjyznp'", "]", ",", "'e'", ":", "[", "'bc01fg45238967deuvhjyznpkmstqrwx'", ",", "'p0r21436x8zb9dcf5h7kjnmqesgutwvy'", "]", ",", "'w'", ":", "[", "'238967debc01fg45kmstqrwxuvhjyznp'", ",", "'14365h7k9dcfesgujnmqp0r2twvyx8zb'", "]", "}", "border", "=", "{", "'n'", ":", "[", "'prxz'", ",", "'bcfguvyz'", "]", ",", "'s'", ":", "[", "'028b'", ",", "'0145hjnp'", "]", ",", "'e'", ":", "[", "'bcfguvyz'", ",", "'prxz'", "]", ",", "'w'", ":", "[", "'0145hjnp'", ",", "'028b'", "]", "}", "last", "=", "geohash", "[", "-", "1", "]", "parent", "=", "geohash", "[", "0", ":", "-", "1", "]", "t", "=", "len", "(", "geohash", ")", "%", "2", "# Check for edge cases", "if", "(", "last", "in", "border", "[", "direction", "]", "[", "t", "]", ")", "and", "(", "parent", ")", ":", "parent", "=", "adjacent", "(", "parent", ",", "direction", ")", "return", "parent", "+", "BASESEQUENCE", "[", "neighbor", "[", "direction", "]", "[", "t", "]", ".", "index", "(", "last", ")", "]" ]
Return the adjacent geohash for a given direction.
[ "Return", "the", "adjacent", "geohash", "for", "a", "given", "direction", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L84-L108
transitland/mapzen-geohash
mzgeohash/geohash.py
neighbors
def neighbors(geohash): """Return all neighboring geohashes.""" return { 'n': adjacent(geohash, 'n'), 'ne': adjacent(adjacent(geohash, 'n'), 'e'), 'e': adjacent(geohash, 'e'), 'se': adjacent(adjacent(geohash, 's'), 'e'), 's': adjacent(geohash, 's'), 'sw': adjacent(adjacent(geohash, 's'), 'w'), 'w': adjacent(geohash, 'w'), 'nw': adjacent(adjacent(geohash, 'n'), 'w'), 'c': geohash }
python
def neighbors(geohash): """Return all neighboring geohashes.""" return { 'n': adjacent(geohash, 'n'), 'ne': adjacent(adjacent(geohash, 'n'), 'e'), 'e': adjacent(geohash, 'e'), 'se': adjacent(adjacent(geohash, 's'), 'e'), 's': adjacent(geohash, 's'), 'sw': adjacent(adjacent(geohash, 's'), 'w'), 'w': adjacent(geohash, 'w'), 'nw': adjacent(adjacent(geohash, 'n'), 'w'), 'c': geohash }
[ "def", "neighbors", "(", "geohash", ")", ":", "return", "{", "'n'", ":", "adjacent", "(", "geohash", ",", "'n'", ")", ",", "'ne'", ":", "adjacent", "(", "adjacent", "(", "geohash", ",", "'n'", ")", ",", "'e'", ")", ",", "'e'", ":", "adjacent", "(", "geohash", ",", "'e'", ")", ",", "'se'", ":", "adjacent", "(", "adjacent", "(", "geohash", ",", "'s'", ")", ",", "'e'", ")", ",", "'s'", ":", "adjacent", "(", "geohash", ",", "'s'", ")", ",", "'sw'", ":", "adjacent", "(", "adjacent", "(", "geohash", ",", "'s'", ")", ",", "'w'", ")", ",", "'w'", ":", "adjacent", "(", "geohash", ",", "'w'", ")", ",", "'nw'", ":", "adjacent", "(", "adjacent", "(", "geohash", ",", "'n'", ")", ",", "'w'", ")", ",", "'c'", ":", "geohash", "}" ]
Return all neighboring geohashes.
[ "Return", "all", "neighboring", "geohashes", "." ]
train
https://github.com/transitland/mapzen-geohash/blob/5264d16dc2df00ebc4aedaa78ca8c6ee3fd655ad/mzgeohash/geohash.py#L110-L122
SiLab-Bonn/pyBAR
pybar/run_manager.py
thunkify
def thunkify(thread_name=None, daemon=True, default_func=None): '''Make a function immediately return a function of no args which, when called, waits for the result, which will start being processed in another thread. Taken from https://wiki.python.org/moin/PythonDecoratorLibrary. ''' def actual_decorator(f): @functools.wraps(f) def thunked(*args, **kwargs): result = [None] exc = [False, None] # has exception?, exception info # wait_event = threading.Event() def worker_func(): try: func_result = f(*args, **kwargs) result[0] = func_result except Exception: exc[0] = True exc[1] = sys.exc_info() logging.error("%s has thrown an exception:\n%s", thread_name, traceback.format_exc()) # finally: # wait_event.set() worker_thread = Thread(target=worker_func, name=thread_name if thread_name else None) worker_thread.daemon = daemon def thunk(timeout=None): # avoid blocking MainThread start_time = time() while True: worker_thread.join(timeout=0.1) if (timeout and timeout < time() - start_time) or not worker_thread.is_alive(): break # worker_thread.join(timeout=timeout) # wait_event.wait() if worker_thread.is_alive(): if default_func is None: return else: return default_func() if exc[0]: raise exc[1][0], exc[1][1], exc[1][2] return result[0] worker_thread.start() # threading.Thread(target=worker_func, name=thread_name if thread_name else None).start() return thunk return thunked return actual_decorator
python
def thunkify(thread_name=None, daemon=True, default_func=None): '''Make a function immediately return a function of no args which, when called, waits for the result, which will start being processed in another thread. Taken from https://wiki.python.org/moin/PythonDecoratorLibrary. ''' def actual_decorator(f): @functools.wraps(f) def thunked(*args, **kwargs): result = [None] exc = [False, None] # has exception?, exception info # wait_event = threading.Event() def worker_func(): try: func_result = f(*args, **kwargs) result[0] = func_result except Exception: exc[0] = True exc[1] = sys.exc_info() logging.error("%s has thrown an exception:\n%s", thread_name, traceback.format_exc()) # finally: # wait_event.set() worker_thread = Thread(target=worker_func, name=thread_name if thread_name else None) worker_thread.daemon = daemon def thunk(timeout=None): # avoid blocking MainThread start_time = time() while True: worker_thread.join(timeout=0.1) if (timeout and timeout < time() - start_time) or not worker_thread.is_alive(): break # worker_thread.join(timeout=timeout) # wait_event.wait() if worker_thread.is_alive(): if default_func is None: return else: return default_func() if exc[0]: raise exc[1][0], exc[1][1], exc[1][2] return result[0] worker_thread.start() # threading.Thread(target=worker_func, name=thread_name if thread_name else None).start() return thunk return thunked return actual_decorator
[ "def", "thunkify", "(", "thread_name", "=", "None", ",", "daemon", "=", "True", ",", "default_func", "=", "None", ")", ":", "def", "actual_decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "thunked", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "None", "]", "exc", "=", "[", "False", ",", "None", "]", "# has exception?, exception info", "# wait_event = threading.Event()", "def", "worker_func", "(", ")", ":", "try", ":", "func_result", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "result", "[", "0", "]", "=", "func_result", "except", "Exception", ":", "exc", "[", "0", "]", "=", "True", "exc", "[", "1", "]", "=", "sys", ".", "exc_info", "(", ")", "logging", ".", "error", "(", "\"%s has thrown an exception:\\n%s\"", ",", "thread_name", ",", "traceback", ".", "format_exc", "(", ")", ")", "# finally:", "# wait_event.set()", "worker_thread", "=", "Thread", "(", "target", "=", "worker_func", ",", "name", "=", "thread_name", "if", "thread_name", "else", "None", ")", "worker_thread", ".", "daemon", "=", "daemon", "def", "thunk", "(", "timeout", "=", "None", ")", ":", "# avoid blocking MainThread", "start_time", "=", "time", "(", ")", "while", "True", ":", "worker_thread", ".", "join", "(", "timeout", "=", "0.1", ")", "if", "(", "timeout", "and", "timeout", "<", "time", "(", ")", "-", "start_time", ")", "or", "not", "worker_thread", ".", "is_alive", "(", ")", ":", "break", "# worker_thread.join(timeout=timeout)", "# wait_event.wait()", "if", "worker_thread", ".", "is_alive", "(", ")", ":", "if", "default_func", "is", "None", ":", "return", "else", ":", "return", "default_func", "(", ")", "if", "exc", "[", "0", "]", ":", "raise", "exc", "[", "1", "]", "[", "0", "]", ",", "exc", "[", "1", "]", "[", "1", "]", ",", "exc", "[", "1", "]", "[", "2", "]", "return", "result", "[", "0", "]", "worker_thread", ".", "start", "(", ")", "# threading.Thread(target=worker_func, name=thread_name if thread_name else None).start()", "return", "thunk", "return", "thunked", "return", "actual_decorator" ]
Make a function immediately return a function of no args which, when called, waits for the result, which will start being processed in another thread. Taken from https://wiki.python.org/moin/PythonDecoratorLibrary.
[ "Make", "a", "function", "immediately", "return", "a", "function", "of", "no", "args", "which", "when", "called", "waits", "for", "the", "result", "which", "will", "start", "being", "processed", "in", "another", "thread", ".", "Taken", "from", "https", ":", "//", "wiki", ".", "python", ".", "org", "/", "moin", "/", "PythonDecoratorLibrary", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L362-L410
SiLab-Bonn/pyBAR
pybar/run_manager.py
set_event_when_keyboard_interrupt
def set_event_when_keyboard_interrupt(_lambda): '''Decorator function that sets Threading.Event() when keyboard interrupt (Ctrl+C) was raised Parameters ---------- _lambda : function Lambda function that points to Threading.Event() object Returns ------- wrapper : function Examples -------- @set_event_when_keyboard_interrupt(lambda x: x.stop_thread_event) def scan(self, **kwargs): # some code Note ---- Decorated functions cannot be derived. ''' def wrapper(f): @wraps(f) def wrapped_f(self, *f_args, **f_kwargs): try: f(self, *f_args, **f_kwargs) except KeyboardInterrupt: _lambda(self).set() # logging.info('Keyboard interrupt: setting %s' % _lambda(self).__name__) return wrapped_f return wrapper
python
def set_event_when_keyboard_interrupt(_lambda): '''Decorator function that sets Threading.Event() when keyboard interrupt (Ctrl+C) was raised Parameters ---------- _lambda : function Lambda function that points to Threading.Event() object Returns ------- wrapper : function Examples -------- @set_event_when_keyboard_interrupt(lambda x: x.stop_thread_event) def scan(self, **kwargs): # some code Note ---- Decorated functions cannot be derived. ''' def wrapper(f): @wraps(f) def wrapped_f(self, *f_args, **f_kwargs): try: f(self, *f_args, **f_kwargs) except KeyboardInterrupt: _lambda(self).set() # logging.info('Keyboard interrupt: setting %s' % _lambda(self).__name__) return wrapped_f return wrapper
[ "def", "set_event_when_keyboard_interrupt", "(", "_lambda", ")", ":", "def", "wrapper", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped_f", "(", "self", ",", "*", "f_args", ",", "*", "*", "f_kwargs", ")", ":", "try", ":", "f", "(", "self", ",", "*", "f_args", ",", "*", "*", "f_kwargs", ")", "except", "KeyboardInterrupt", ":", "_lambda", "(", "self", ")", ".", "set", "(", ")", "# logging.info('Keyboard interrupt: setting %s' % _lambda(self).__name__)", "return", "wrapped_f", "return", "wrapper" ]
Decorator function that sets Threading.Event() when keyboard interrupt (Ctrl+C) was raised Parameters ---------- _lambda : function Lambda function that points to Threading.Event() object Returns ------- wrapper : function Examples -------- @set_event_when_keyboard_interrupt(lambda x: x.stop_thread_event) def scan(self, **kwargs): # some code Note ---- Decorated functions cannot be derived.
[ "Decorator", "function", "that", "sets", "Threading", ".", "Event", "()", "when", "keyboard", "interrupt", "(", "Ctrl", "+", "C", ")", "was", "raised" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L655-L686
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.run_id
def run_id(self): '''Run name without whitespace ''' s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
python
def run_id(self): '''Run name without whitespace ''' s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
[ "def", "run_id", "(", "self", ")", ":", "s1", "=", "re", ".", "sub", "(", "'(.)([A-Z][a-z]+)'", ",", "r'\\1_\\2'", ",", "self", ".", "__class__", ".", "__name__", ")", "return", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Run name without whitespace
[ "Run", "name", "without", "whitespace" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L83-L87
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.conf
def conf(self): '''Configuration (namedtuple) ''' conf = namedtuple('conf', field_names=self._conf.keys()) return conf(**self._conf)
python
def conf(self): '''Configuration (namedtuple) ''' conf = namedtuple('conf', field_names=self._conf.keys()) return conf(**self._conf)
[ "def", "conf", "(", "self", ")", ":", "conf", "=", "namedtuple", "(", "'conf'", ",", "field_names", "=", "self", ".", "_conf", ".", "keys", "(", ")", ")", "return", "conf", "(", "*", "*", "self", ".", "_conf", ")" ]
Configuration (namedtuple)
[ "Configuration", "(", "namedtuple", ")" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L90-L94
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.run_conf
def run_conf(self): '''Run configuration (namedtuple) ''' run_conf = namedtuple('run_conf', field_names=self._run_conf.keys()) return run_conf(**self._run_conf)
python
def run_conf(self): '''Run configuration (namedtuple) ''' run_conf = namedtuple('run_conf', field_names=self._run_conf.keys()) return run_conf(**self._run_conf)
[ "def", "run_conf", "(", "self", ")", ":", "run_conf", "=", "namedtuple", "(", "'run_conf'", ",", "field_names", "=", "self", ".", "_run_conf", ".", "keys", "(", ")", ")", "return", "run_conf", "(", "*", "*", "self", ".", "_run_conf", ")" ]
Run configuration (namedtuple)
[ "Run", "configuration", "(", "namedtuple", ")" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L97-L101
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.default_run_conf
def default_run_conf(self): '''Default run configuration (namedtuple) ''' default_run_conf = namedtuple('default_run_conf', field_names=self._default_run_conf.keys()) return default_run_conf(**self._default_run_conf)
python
def default_run_conf(self): '''Default run configuration (namedtuple) ''' default_run_conf = namedtuple('default_run_conf', field_names=self._default_run_conf.keys()) return default_run_conf(**self._default_run_conf)
[ "def", "default_run_conf", "(", "self", ")", ":", "default_run_conf", "=", "namedtuple", "(", "'default_run_conf'", ",", "field_names", "=", "self", ".", "_default_run_conf", ".", "keys", "(", ")", ")", "return", "default_run_conf", "(", "*", "*", "self", ".", "_default_run_conf", ")" ]
Default run configuration (namedtuple)
[ "Default", "run", "configuration", "(", "namedtuple", ")" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L110-L114
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase._init
def _init(self, run_conf, run_number=None): '''Initialization before a new run. ''' self.stop_run.clear() self.abort_run.clear() self._run_status = run_status.running self._write_run_number(run_number) self._init_run_conf(run_conf)
python
def _init(self, run_conf, run_number=None): '''Initialization before a new run. ''' self.stop_run.clear() self.abort_run.clear() self._run_status = run_status.running self._write_run_number(run_number) self._init_run_conf(run_conf)
[ "def", "_init", "(", "self", ",", "run_conf", ",", "run_number", "=", "None", ")", ":", "self", ".", "stop_run", ".", "clear", "(", ")", "self", ".", "abort_run", ".", "clear", "(", ")", "self", ".", "_run_status", "=", "run_status", ".", "running", "self", ".", "_write_run_number", "(", "run_number", ")", "self", ".", "_init_run_conf", "(", "run_conf", ")" ]
Initialization before a new run.
[ "Initialization", "before", "a", "new", "run", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L188-L195
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.connect_cancel
def connect_cancel(self, functions): '''Run given functions when a run is cancelled. ''' self._cancel_functions = [] for func in functions: if isinstance(func, basestring) and hasattr(self, func) and callable(getattr(self, func)): self._cancel_functions.append(getattr(self, func)) elif callable(func): self._cancel_functions.append(func) else: raise ValueError("Unknown function %s" % str(func))
python
def connect_cancel(self, functions): '''Run given functions when a run is cancelled. ''' self._cancel_functions = [] for func in functions: if isinstance(func, basestring) and hasattr(self, func) and callable(getattr(self, func)): self._cancel_functions.append(getattr(self, func)) elif callable(func): self._cancel_functions.append(func) else: raise ValueError("Unknown function %s" % str(func))
[ "def", "connect_cancel", "(", "self", ",", "functions", ")", ":", "self", ".", "_cancel_functions", "=", "[", "]", "for", "func", "in", "functions", ":", "if", "isinstance", "(", "func", ",", "basestring", ")", "and", "hasattr", "(", "self", ",", "func", ")", "and", "callable", "(", "getattr", "(", "self", ",", "func", ")", ")", ":", "self", ".", "_cancel_functions", ".", "append", "(", "getattr", "(", "self", ",", "func", ")", ")", "elif", "callable", "(", "func", ")", ":", "self", ".", "_cancel_functions", ".", "append", "(", "func", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown function %s\"", "%", "str", "(", "func", ")", ")" ]
Run given functions when a run is cancelled.
[ "Run", "given", "functions", "when", "a", "run", "is", "cancelled", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L248-L258
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.handle_cancel
def handle_cancel(self, **kwargs): '''Cancelling a run. ''' for func in self._cancel_functions: f_args = getargspec(func)[0] f_kwargs = {key: kwargs[key] for key in f_args if key in kwargs} func(**f_kwargs)
python
def handle_cancel(self, **kwargs): '''Cancelling a run. ''' for func in self._cancel_functions: f_args = getargspec(func)[0] f_kwargs = {key: kwargs[key] for key in f_args if key in kwargs} func(**f_kwargs)
[ "def", "handle_cancel", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "func", "in", "self", ".", "_cancel_functions", ":", "f_args", "=", "getargspec", "(", "func", ")", "[", "0", "]", "f_kwargs", "=", "{", "key", ":", "kwargs", "[", "key", "]", "for", "key", "in", "f_args", "if", "key", "in", "kwargs", "}", "func", "(", "*", "*", "f_kwargs", ")" ]
Cancelling a run.
[ "Cancelling", "a", "run", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L260-L266
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.stop
def stop(self, msg=None): '''Stopping a run. Control for loops. Gentle stop/abort. This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete. ''' if not self.stop_run.is_set(): if msg: logging.info('%s%s Stopping run...', msg, ('' if msg[-1] in punctuation else '.')) else: logging.info('Stopping run...') self.stop_run.set()
python
def stop(self, msg=None): '''Stopping a run. Control for loops. Gentle stop/abort. This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete. ''' if not self.stop_run.is_set(): if msg: logging.info('%s%s Stopping run...', msg, ('' if msg[-1] in punctuation else '.')) else: logging.info('Stopping run...') self.stop_run.set()
[ "def", "stop", "(", "self", ",", "msg", "=", "None", ")", ":", "if", "not", "self", ".", "stop_run", ".", "is_set", "(", ")", ":", "if", "msg", ":", "logging", ".", "info", "(", "'%s%s Stopping run...'", ",", "msg", ",", "(", "''", "if", "msg", "[", "-", "1", "]", "in", "punctuation", "else", "'.'", ")", ")", "else", ":", "logging", ".", "info", "(", "'Stopping run...'", ")", "self", ".", "stop_run", ".", "set", "(", ")" ]
Stopping a run. Control for loops. Gentle stop/abort. This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete.
[ "Stopping", "a", "run", ".", "Control", "for", "loops", ".", "Gentle", "stop", "/", "abort", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L268-L278
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunBase.abort
def abort(self, msg=None): '''Aborting a run. Control for loops. Immediate stop/abort. The implementation should stop a run ASAP when this event is set. The run is considered incomplete. ''' if not self.abort_run.is_set(): if msg: logging.error('%s%s Aborting run...', msg, ('' if msg[-1] in punctuation else '.')) else: logging.error('Aborting run...') self.abort_run.set() self.stop_run.set()
python
def abort(self, msg=None): '''Aborting a run. Control for loops. Immediate stop/abort. The implementation should stop a run ASAP when this event is set. The run is considered incomplete. ''' if not self.abort_run.is_set(): if msg: logging.error('%s%s Aborting run...', msg, ('' if msg[-1] in punctuation else '.')) else: logging.error('Aborting run...') self.abort_run.set() self.stop_run.set()
[ "def", "abort", "(", "self", ",", "msg", "=", "None", ")", ":", "if", "not", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "if", "msg", ":", "logging", ".", "error", "(", "'%s%s Aborting run...'", ",", "msg", ",", "(", "''", "if", "msg", "[", "-", "1", "]", "in", "punctuation", "else", "'.'", ")", ")", "else", ":", "logging", ".", "error", "(", "'Aborting run...'", ")", "self", ".", "abort_run", ".", "set", "(", ")", "self", ".", "stop_run", ".", "set", "(", ")" ]
Aborting a run. Control for loops. Immediate stop/abort. The implementation should stop a run ASAP when this event is set. The run is considered incomplete.
[ "Aborting", "a", "run", ".", "Control", "for", "loops", ".", "Immediate", "stop", "/", "abort", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L280-L291
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunManager.run_run
def run_run(self, run, conf=None, run_conf=None, use_thread=False, catch_exception=True): '''Runs a run in another thread. Non-blocking. Parameters ---------- run : class, object Run class or object. run_conf : str, dict, file Specific configuration for the run. use_thread : bool If True, run run in thread and returns blocking function. Returns ------- If use_thread is True, returns function, which blocks until thread terminates, and which itself returns run status. If use_thread is False, returns run status. ''' if isinstance(conf, basestring) and os.path.isfile(conf): logging.info('Updating configuration from file %s', os.path.abspath(conf)) elif conf is not None: logging.info('Updating configuration') conf = self.open_conf(conf) self._conf.update(conf) if isclass(run): # instantiate the class run = run(conf=self._conf) local_run_conf = {} # general parameters from conf if 'run_conf' in self._conf: logging.info('Updating run configuration using run_conf key from configuration') local_run_conf.update(self._conf['run_conf']) # check for class name, scan specific parameters from conf if run.__class__.__name__ in self._conf: logging.info('Updating run configuration using %s key from configuration' % (run.__class__.__name__,)) local_run_conf.update(self._conf[run.__class__.__name__]) if isinstance(run_conf, basestring) and os.path.isfile(run_conf): logging.info('Updating run configuration from file %s', os.path.abspath(run_conf)) elif run_conf is not None: logging.info('Updating run configuration') run_conf = self.open_conf(run_conf) # check for class name, scan specific parameters from conf if run.__class__.__name__ in run_conf: run_conf = run_conf[run.__class__.__name__] # run_conf parameter has highest priority, updated last local_run_conf.update(run_conf) if use_thread: self.current_run = run @thunkify(thread_name='RunThread', daemon=True, default_func=self.current_run.get_run_status) def run_run_in_thread(): return run.run(run_conf=local_run_conf) signal.signal(signal.SIGINT, self._signal_handler) logging.info('Press Ctrl-C to stop run') return run_run_in_thread() else: self.current_run = run status = run.run(run_conf=local_run_conf) if not catch_exception and status != run_status.finished: raise RuntimeError('Exception occurred. Please read the log.') return status
python
def run_run(self, run, conf=None, run_conf=None, use_thread=False, catch_exception=True): '''Runs a run in another thread. Non-blocking. Parameters ---------- run : class, object Run class or object. run_conf : str, dict, file Specific configuration for the run. use_thread : bool If True, run run in thread and returns blocking function. Returns ------- If use_thread is True, returns function, which blocks until thread terminates, and which itself returns run status. If use_thread is False, returns run status. ''' if isinstance(conf, basestring) and os.path.isfile(conf): logging.info('Updating configuration from file %s', os.path.abspath(conf)) elif conf is not None: logging.info('Updating configuration') conf = self.open_conf(conf) self._conf.update(conf) if isclass(run): # instantiate the class run = run(conf=self._conf) local_run_conf = {} # general parameters from conf if 'run_conf' in self._conf: logging.info('Updating run configuration using run_conf key from configuration') local_run_conf.update(self._conf['run_conf']) # check for class name, scan specific parameters from conf if run.__class__.__name__ in self._conf: logging.info('Updating run configuration using %s key from configuration' % (run.__class__.__name__,)) local_run_conf.update(self._conf[run.__class__.__name__]) if isinstance(run_conf, basestring) and os.path.isfile(run_conf): logging.info('Updating run configuration from file %s', os.path.abspath(run_conf)) elif run_conf is not None: logging.info('Updating run configuration') run_conf = self.open_conf(run_conf) # check for class name, scan specific parameters from conf if run.__class__.__name__ in run_conf: run_conf = run_conf[run.__class__.__name__] # run_conf parameter has highest priority, updated last local_run_conf.update(run_conf) if use_thread: self.current_run = run @thunkify(thread_name='RunThread', daemon=True, default_func=self.current_run.get_run_status) def run_run_in_thread(): return run.run(run_conf=local_run_conf) signal.signal(signal.SIGINT, self._signal_handler) logging.info('Press Ctrl-C to stop run') return run_run_in_thread() else: self.current_run = run status = run.run(run_conf=local_run_conf) if not catch_exception and status != run_status.finished: raise RuntimeError('Exception occurred. Please read the log.') return status
[ "def", "run_run", "(", "self", ",", "run", ",", "conf", "=", "None", ",", "run_conf", "=", "None", ",", "use_thread", "=", "False", ",", "catch_exception", "=", "True", ")", ":", "if", "isinstance", "(", "conf", ",", "basestring", ")", "and", "os", ".", "path", ".", "isfile", "(", "conf", ")", ":", "logging", ".", "info", "(", "'Updating configuration from file %s'", ",", "os", ".", "path", ".", "abspath", "(", "conf", ")", ")", "elif", "conf", "is", "not", "None", ":", "logging", ".", "info", "(", "'Updating configuration'", ")", "conf", "=", "self", ".", "open_conf", "(", "conf", ")", "self", ".", "_conf", ".", "update", "(", "conf", ")", "if", "isclass", "(", "run", ")", ":", "# instantiate the class", "run", "=", "run", "(", "conf", "=", "self", ".", "_conf", ")", "local_run_conf", "=", "{", "}", "# general parameters from conf", "if", "'run_conf'", "in", "self", ".", "_conf", ":", "logging", ".", "info", "(", "'Updating run configuration using run_conf key from configuration'", ")", "local_run_conf", ".", "update", "(", "self", ".", "_conf", "[", "'run_conf'", "]", ")", "# check for class name, scan specific parameters from conf", "if", "run", ".", "__class__", ".", "__name__", "in", "self", ".", "_conf", ":", "logging", ".", "info", "(", "'Updating run configuration using %s key from configuration'", "%", "(", "run", ".", "__class__", ".", "__name__", ",", ")", ")", "local_run_conf", ".", "update", "(", "self", ".", "_conf", "[", "run", ".", "__class__", ".", "__name__", "]", ")", "if", "isinstance", "(", "run_conf", ",", "basestring", ")", "and", "os", ".", "path", ".", "isfile", "(", "run_conf", ")", ":", "logging", ".", "info", "(", "'Updating run configuration from file %s'", ",", "os", ".", "path", ".", "abspath", "(", "run_conf", ")", ")", "elif", "run_conf", "is", "not", "None", ":", "logging", ".", "info", "(", "'Updating run configuration'", ")", "run_conf", "=", "self", ".", "open_conf", "(", "run_conf", ")", "# check for class name, scan specific parameters from conf", "if", "run", ".", "__class__", ".", "__name__", "in", "run_conf", ":", "run_conf", "=", "run_conf", "[", "run", ".", "__class__", ".", "__name__", "]", "# run_conf parameter has highest priority, updated last", "local_run_conf", ".", "update", "(", "run_conf", ")", "if", "use_thread", ":", "self", ".", "current_run", "=", "run", "@", "thunkify", "(", "thread_name", "=", "'RunThread'", ",", "daemon", "=", "True", ",", "default_func", "=", "self", ".", "current_run", ".", "get_run_status", ")", "def", "run_run_in_thread", "(", ")", ":", "return", "run", ".", "run", "(", "run_conf", "=", "local_run_conf", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "_signal_handler", ")", "logging", ".", "info", "(", "'Press Ctrl-C to stop run'", ")", "return", "run_run_in_thread", "(", ")", "else", ":", "self", ".", "current_run", "=", "run", "status", "=", "run", ".", "run", "(", "run_conf", "=", "local_run_conf", ")", "if", "not", "catch_exception", "and", "status", "!=", "run_status", ".", "finished", ":", "raise", "RuntimeError", "(", "'Exception occurred. Please read the log.'", ")", "return", "status" ]
Runs a run in another thread. Non-blocking. Parameters ---------- run : class, object Run class or object. run_conf : str, dict, file Specific configuration for the run. use_thread : bool If True, run run in thread and returns blocking function. Returns ------- If use_thread is True, returns function, which blocks until thread terminates, and which itself returns run status. If use_thread is False, returns run status.
[ "Runs", "a", "run", "in", "another", "thread", ".", "Non", "-", "blocking", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L513-L578
SiLab-Bonn/pyBAR
pybar/run_manager.py
RunManager.run_primlist
def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
python
def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
[ "def", "run_primlist", "(", "self", ",", "primlist", ",", "skip_remaining", "=", "False", ")", ":", "runlist", "=", "self", ".", "open_primlist", "(", "primlist", ")", "for", "index", ",", "run", "in", "enumerate", "(", "runlist", ")", ":", "logging", ".", "info", "(", "'Progressing with run %i out of %i...'", ",", "index", "+", "1", ",", "len", "(", "runlist", ")", ")", "join", "=", "self", ".", "run_run", "(", "run", ",", "use_thread", "=", "True", ")", "status", "=", "join", "(", ")", "if", "skip_remaining", "and", "not", "status", "==", "run_status", ".", "finished", ":", "logging", ".", "error", "(", "'Exited run %i with status %s: Skipping all remaining runs.'", ",", "run", ".", "run_number", ",", "status", ")", "break" ]
Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value>
[ "Runs", "runs", "from", "a", "primlist", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/run_manager.py#L580-L602
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
analyze_beam_spot
def analyze_beam_spot(scan_base, combine_n_readouts=1000, chunk_size=10000000, plot_occupancy_hists=False, output_pdf=None, output_file=None): ''' Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] x = [] y = [] for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_hit_file_h5: # get data and data pointer meta_data_array = in_hit_file_h5.root.meta_data[:] hit_table = in_hit_file_h5.root.Hits # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number) parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) # create a event_numer index (important) analysis_utils.index_event_number(hit_table) # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_tot_hist = False analyze_data.create_bcid_hist = False analyze_data.histogram.set_no_scan_parameter() # variables for read speed up index = 0 # index where to start the read out, 0 at the beginning, increased during looping best_chunk_size = chunk_size progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() # loop over the selected events for parameter_index, parameter_range in enumerate(parameter_ranges): logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') analyze_data.reset() # resets the data of the last analysis # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size): analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction # get and store results occupancy_array = analyze_data.histogram.get_occupancy() projection_x = np.sum(occupancy_array, axis=0).ravel() projection_y = np.sum(occupancy_array, axis=1).ravel() x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80))) y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336))) time_stamp.append(parameter_range[0]) if plot_occupancy_hists: plotting.plot_occupancy(occupancy_array[:, :, 0], title='Occupancy for events between ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[0])) + ' and ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[1])), filename=output_pdf) progress_bar.finish() plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y], title='Mean beam position', x_label='x [um]', y_label='y [um]', marker_style='-o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, x, y), dtype=[('time_stamp', float), ('x', float), ('y', float)]) try: beam_spot_table = out_file_h5.create_table(out_file_h5.root, name='Beamspot', description=rec_array, title='Beam spot position', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) beam_spot_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.') return time_stamp, x, y
python
def analyze_beam_spot(scan_base, combine_n_readouts=1000, chunk_size=10000000, plot_occupancy_hists=False, output_pdf=None, output_file=None): ''' Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] x = [] y = [] for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_hit_file_h5: # get data and data pointer meta_data_array = in_hit_file_h5.root.meta_data[:] hit_table = in_hit_file_h5.root.Hits # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number) parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) # create a event_numer index (important) analysis_utils.index_event_number(hit_table) # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_tot_hist = False analyze_data.create_bcid_hist = False analyze_data.histogram.set_no_scan_parameter() # variables for read speed up index = 0 # index where to start the read out, 0 at the beginning, increased during looping best_chunk_size = chunk_size progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() # loop over the selected events for parameter_index, parameter_range in enumerate(parameter_ranges): logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') analyze_data.reset() # resets the data of the last analysis # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size): analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction # get and store results occupancy_array = analyze_data.histogram.get_occupancy() projection_x = np.sum(occupancy_array, axis=0).ravel() projection_y = np.sum(occupancy_array, axis=1).ravel() x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80))) y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336))) time_stamp.append(parameter_range[0]) if plot_occupancy_hists: plotting.plot_occupancy(occupancy_array[:, :, 0], title='Occupancy for events between ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[0])) + ' and ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[1])), filename=output_pdf) progress_bar.finish() plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y], title='Mean beam position', x_label='x [um]', y_label='y [um]', marker_style='-o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, x, y), dtype=[('time_stamp', float), ('x', float), ('y', float)]) try: beam_spot_table = out_file_h5.create_table(out_file_h5.root, name='Beamspot', description=rec_array, title='Beam spot position', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) beam_spot_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.') return time_stamp, x, y
[ "def", "analyze_beam_spot", "(", "scan_base", ",", "combine_n_readouts", "=", "1000", ",", "chunk_size", "=", "10000000", ",", "plot_occupancy_hists", "=", "False", ",", "output_pdf", "=", "None", ",", "output_file", "=", "None", ")", ":", "time_stamp", "=", "[", "]", "x", "=", "[", "]", "y", "=", "[", "]", "for", "data_file", "in", "scan_base", ":", "with", "tb", ".", "open_file", "(", "data_file", "+", "'_interpreted.h5'", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "# get data and data pointer", "meta_data_array", "=", "in_hit_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "hit_table", "=", "in_hit_file_h5", ".", "root", ".", "Hits", "# determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)", "parameter_ranges", "=", "np", ".", "column_stack", "(", "(", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'timestamp_start'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ",", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'event_number'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ")", ")", "# create a event_numer index (important)", "analysis_utils", ".", "index_event_number", "(", "hit_table", ")", "# initialize the analysis and set settings", "analyze_data", "=", "AnalyzeRawData", "(", ")", "analyze_data", ".", "create_tot_hist", "=", "False", "analyze_data", ".", "create_bcid_hist", "=", "False", "analyze_data", ".", "histogram", ".", "set_no_scan_parameter", "(", ")", "# variables for read speed up", "index", "=", "0", "# index where to start the read out, 0 at the beginning, increased during looping", "best_chunk_size", "=", "chunk_size", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "hit_table", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "# loop over the selected events", "for", "parameter_index", ",", "parameter_range", "in", "enumerate", "(", "parameter_ranges", ")", ":", "logging", ".", "debug", "(", "'Analyze time stamp '", "+", "str", "(", "parameter_range", "[", "0", "]", ")", "+", "' and data from events = ['", "+", "str", "(", "parameter_range", "[", "2", "]", ")", "+", "','", "+", "str", "(", "parameter_range", "[", "3", "]", ")", "+", "'[ '", "+", "str", "(", "int", "(", "float", "(", "float", "(", "parameter_index", ")", "/", "float", "(", "len", "(", "parameter_ranges", ")", ")", "*", "100.0", ")", ")", ")", "+", "'%'", ")", "analyze_data", ".", "reset", "(", ")", "# resets the data of the last analysis", "# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given", "readout_hit_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "for", "hits", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "hit_table", ",", "start_event_number", "=", "parameter_range", "[", "2", "]", ",", "stop_event_number", "=", "parameter_range", "[", "3", "]", ",", "start_index", "=", "index", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "analyze_data", ".", "analyze_hits", "(", "hits", ")", "# analyze the selected hits in chunks", "readout_hit_len", "+=", "hits", ".", "shape", "[", "0", "]", "progress_bar", ".", "update", "(", "index", ")", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_hit_len", ")", "if", "int", "(", "1.05", "*", "readout_hit_len", ")", "<", "chunk_size", "else", "chunk_size", "# to increase the readout speed, estimated the number of hits for one read instruction", "# get and store results", "occupancy_array", "=", "analyze_data", ".", "histogram", ".", "get_occupancy", "(", ")", "projection_x", "=", "np", ".", "sum", "(", "occupancy_array", ",", "axis", "=", "0", ")", ".", "ravel", "(", ")", "projection_y", "=", "np", ".", "sum", "(", "occupancy_array", ",", "axis", "=", "1", ")", ".", "ravel", "(", ")", "x", ".", "append", "(", "analysis_utils", ".", "get_mean_from_histogram", "(", "projection_x", ",", "bin_positions", "=", "range", "(", "0", ",", "80", ")", ")", ")", "y", ".", "append", "(", "analysis_utils", ".", "get_mean_from_histogram", "(", "projection_y", ",", "bin_positions", "=", "range", "(", "0", ",", "336", ")", ")", ")", "time_stamp", ".", "append", "(", "parameter_range", "[", "0", "]", ")", "if", "plot_occupancy_hists", ":", "plotting", ".", "plot_occupancy", "(", "occupancy_array", "[", ":", ",", ":", ",", "0", "]", ",", "title", "=", "'Occupancy for events between '", "+", "time", ".", "strftime", "(", "'%H:%M:%S'", ",", "time", ".", "localtime", "(", "parameter_range", "[", "0", "]", ")", ")", "+", "' and '", "+", "time", ".", "strftime", "(", "'%H:%M:%S'", ",", "time", ".", "localtime", "(", "parameter_range", "[", "1", "]", ")", ")", ",", "filename", "=", "output_pdf", ")", "progress_bar", ".", "finish", "(", ")", "plotting", ".", "plot_scatter", "(", "[", "i", "*", "250", "for", "i", "in", "x", "]", ",", "[", "i", "*", "50", "for", "i", "in", "y", "]", ",", "title", "=", "'Mean beam position'", ",", "x_label", "=", "'x [um]'", ",", "y_label", "=", "'y [um]'", ",", "marker_style", "=", "'-o'", ",", "filename", "=", "output_pdf", ")", "if", "output_file", ":", "with", "tb", ".", "open_file", "(", "output_file", ",", "mode", "=", "\"a\"", ")", "as", "out_file_h5", ":", "rec_array", "=", "np", ".", "array", "(", "zip", "(", "time_stamp", ",", "x", ",", "y", ")", ",", "dtype", "=", "[", "(", "'time_stamp'", ",", "float", ")", ",", "(", "'x'", ",", "float", ")", ",", "(", "'y'", ",", "float", ")", "]", ")", "try", ":", "beam_spot_table", "=", "out_file_h5", ".", "create_table", "(", "out_file_h5", ".", "root", ",", "name", "=", "'Beamspot'", ",", "description", "=", "rec_array", ",", "title", "=", "'Beam spot position'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "beam_spot_table", "[", ":", "]", "=", "rec_array", "except", "tb", ".", "exceptions", ".", "NodeError", ":", "logging", ".", "warning", "(", "output_file", "+", "' has already a Beamspot note, do not overwrite existing.'", ")", "return", "time_stamp", ",", "x", ",", "y" ]
Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen
[ "Determines", "the", "mean", "x", "and", "y", "beam", "spot", "position", "as", "a", "function", "of", "time", ".", "Therefore", "the", "data", "of", "a", "fixed", "number", "of", "read", "outs", "are", "combined", "(", "combine_n_readouts", ")", ".", "The", "occupancy", "is", "determined", "for", "the", "given", "combined", "events", "and", "stored", "into", "a", "pdf", "file", ".", "At", "the", "end", "the", "beam", "x", "and", "y", "is", "plotted", "into", "a", "scatter", "plot", "with", "absolute", "positions", "in", "um", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L23-L99
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
analyze_event_rate
def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None): ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] rate = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r") as in_file_h5: meta_data_array = in_file_h5.root.meta_data[:] parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) if time_line_absolute: time_stamp.extend(parameter_ranges[:-1, 0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0) rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])) # d#Events / dt if time_line_absolute: plotting.plot_scatter_time(time_stamp, rate, title='Event rate [Hz]', marker_style='o', filename=output_pdf) else: plotting.plot_scatter(time_stamp, rate, title='Events per time', x_label='Progressed time [min.]', y_label='Events rate [Hz]', marker_style='o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, rate), dtype=[('time_stamp', float), ('rate', float)]).view(np.recarray) try: rate_table = out_file_h5.create_table(out_file_h5.root, name='Eventrate', description=rec_array, title='Event rate', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) rate_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Eventrate note, do not overwrite existing.') return time_stamp, rate
python
def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None): ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] rate = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r") as in_file_h5: meta_data_array = in_file_h5.root.meta_data[:] parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) if time_line_absolute: time_stamp.extend(parameter_ranges[:-1, 0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0) rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])) # d#Events / dt if time_line_absolute: plotting.plot_scatter_time(time_stamp, rate, title='Event rate [Hz]', marker_style='o', filename=output_pdf) else: plotting.plot_scatter(time_stamp, rate, title='Events per time', x_label='Progressed time [min.]', y_label='Events rate [Hz]', marker_style='o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, rate), dtype=[('time_stamp', float), ('rate', float)]).view(np.recarray) try: rate_table = out_file_h5.create_table(out_file_h5.root, name='Eventrate', description=rec_array, title='Event rate', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) rate_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Eventrate note, do not overwrite existing.') return time_stamp, rate
[ "def", "analyze_event_rate", "(", "scan_base", ",", "combine_n_readouts", "=", "1000", ",", "time_line_absolute", "=", "True", ",", "output_pdf", "=", "None", ",", "output_file", "=", "None", ")", ":", "time_stamp", "=", "[", "]", "rate", "=", "[", "]", "start_time_set", "=", "False", "for", "data_file", "in", "scan_base", ":", "with", "tb", ".", "open_file", "(", "data_file", "+", "'_interpreted.h5'", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "meta_data_array", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "parameter_ranges", "=", "np", ".", "column_stack", "(", "(", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'timestamp_start'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ",", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'event_number'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ")", ")", "if", "time_line_absolute", ":", "time_stamp", ".", "extend", "(", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", ")", "else", ":", "if", "not", "start_time_set", ":", "start_time", "=", "parameter_ranges", "[", "0", ",", "0", "]", "start_time_set", "=", "True", "time_stamp", ".", "extend", "(", "(", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", "-", "start_time", ")", "/", "60.0", ")", "rate", ".", "extend", "(", "(", "parameter_ranges", "[", ":", "-", "1", ",", "3", "]", "-", "parameter_ranges", "[", ":", "-", "1", ",", "2", "]", ")", "/", "(", "parameter_ranges", "[", ":", "-", "1", ",", "1", "]", "-", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", ")", ")", "# d#Events / dt", "if", "time_line_absolute", ":", "plotting", ".", "plot_scatter_time", "(", "time_stamp", ",", "rate", ",", "title", "=", "'Event rate [Hz]'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ")", "else", ":", "plotting", ".", "plot_scatter", "(", "time_stamp", ",", "rate", ",", "title", "=", "'Events per time'", ",", "x_label", "=", "'Progressed time [min.]'", ",", "y_label", "=", "'Events rate [Hz]'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ")", "if", "output_file", ":", "with", "tb", ".", "open_file", "(", "output_file", ",", "mode", "=", "\"a\"", ")", "as", "out_file_h5", ":", "rec_array", "=", "np", ".", "array", "(", "zip", "(", "time_stamp", ",", "rate", ")", ",", "dtype", "=", "[", "(", "'time_stamp'", ",", "float", ")", ",", "(", "'rate'", ",", "float", ")", "]", ")", ".", "view", "(", "np", ".", "recarray", ")", "try", ":", "rate_table", "=", "out_file_h5", ".", "create_table", "(", "out_file_h5", ".", "root", ",", "name", "=", "'Eventrate'", ",", "description", "=", "rec_array", ",", "title", "=", "'Event rate'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "rate_table", "[", ":", "]", "=", "rec_array", "except", "tb", ".", "exceptions", ".", "NodeError", ":", "logging", ".", "warning", "(", "output_file", "+", "' has already a Eventrate note, do not overwrite existing.'", ")", "return", "time_stamp", ",", "rate" ]
Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen
[ "Determines", "the", "number", "of", "events", "as", "a", "function", "of", "time", ".", "Therefore", "the", "data", "of", "a", "fixed", "number", "of", "read", "outs", "are", "combined", "(", "combine_n_readouts", ")", ".", "The", "number", "of", "events", "is", "taken", "from", "the", "meta", "data", "info", "and", "stored", "into", "a", "pdf", "file", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L102-L147
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
analyse_n_cluster_per_event
def analyse_n_cluster_per_event(scan_base, include_no_cluster=False, time_line_absolute=True, combine_n_readouts=1000, chunk_size=10000000, plot_n_cluster_hists=False, output_pdf=None, output_file=None): ''' Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] include_no_cluster: bool Set to true to also consider all events without any hit. combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] n_cluster = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_cluster_file_h5: # get data and data pointer meta_data_array = in_cluster_file_h5.root.meta_data[:] cluster_table = in_cluster_file_h5.root.Cluster # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number) parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) # create a event_numer index (important for speed) analysis_utils.index_event_number(cluster_table) # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_tot_hist = False analyze_data.create_bcid_hist = False # variables for read speed up index = 0 # index where to start the read out, 0 at the beginning, increased during looping best_chunk_size = chunk_size total_cluster = cluster_table.shape[0] progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_cluster, term_width=80) progress_bar.start() # loop over the selected events for parameter_index, parameter_range in enumerate(parameter_ranges): logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') analyze_data.reset() # resets the data of the last analysis # loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given readout_cluster_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up hist = None for clusters, index in analysis_utils.data_aligned_at_events(cluster_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size): n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1] # array with the number of cluster per event, cluster per event are at least 1 if hist is None: hist = np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0] else: hist = np.add(hist, np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0]) if include_no_cluster and parameter_range[3] is not None: # happend for the last readout hist[0] = (parameter_range[3] - parameter_range[2]) - len(n_cluster_per_event) # add the events without any cluster readout_cluster_len += clusters.shape[0] total_cluster -= len(clusters) progress_bar.update(index) best_chunk_size = int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if plot_n_cluster_hists: plotting.plot_1d_hist(hist, title='Number of cluster per event at ' + str(parameter_range[0]), x_axis_title='Number of cluster', y_axis_title='#', log_y=True, filename=output_pdf) hist = hist.astype('f4') / np.sum(hist) # calculate fraction from total numbers if time_line_absolute: time_stamp.append(parameter_range[0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.append((parameter_range[0] - start_time) / 60.0) n_cluster.append(hist) progress_bar.finish() if total_cluster != 0: logging.warning('Not all clusters were selected during analysis. Analysis is therefore not exact') if time_line_absolute: plotting.plot_scatter_time(time_stamp, n_cluster, title='Number of cluster per event as a function of time', marker_style='o', filename=output_pdf, legend=('0 cluster', '1 cluster', '2 cluster', '3 cluster') if include_no_cluster else ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster')) else: plotting.plot_scatter(time_stamp, n_cluster, title='Number of cluster per event as a function of time', x_label='time [min.]', marker_style='o', filename=output_pdf, legend=('0 cluster', '1 cluster', '2 cluster', '3 cluster') if include_no_cluster else ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster')) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: cluster_array = np.array(n_cluster) rec_array = np.array(zip(time_stamp, cluster_array[:, 0], cluster_array[:, 1], cluster_array[:, 2], cluster_array[:, 3], cluster_array[:, 4], cluster_array[:, 5]), dtype=[('time_stamp', float), ('cluster_0', float), ('cluster_1', float), ('cluster_2', float), ('cluster_3', float), ('cluster_4', float), ('cluster_5', float)]).view(np.recarray) try: n_cluster_table = out_file_h5.create_table(out_file_h5.root, name='n_cluster', description=rec_array, title='Cluster per event', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) n_cluster_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.') return time_stamp, n_cluster
python
def analyse_n_cluster_per_event(scan_base, include_no_cluster=False, time_line_absolute=True, combine_n_readouts=1000, chunk_size=10000000, plot_n_cluster_hists=False, output_pdf=None, output_file=None): ''' Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] include_no_cluster: bool Set to true to also consider all events without any hit. combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] n_cluster = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_cluster_file_h5: # get data and data pointer meta_data_array = in_cluster_file_h5.root.meta_data[:] cluster_table = in_cluster_file_h5.root.Cluster # determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number) parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) # create a event_numer index (important for speed) analysis_utils.index_event_number(cluster_table) # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_tot_hist = False analyze_data.create_bcid_hist = False # variables for read speed up index = 0 # index where to start the read out, 0 at the beginning, increased during looping best_chunk_size = chunk_size total_cluster = cluster_table.shape[0] progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_cluster, term_width=80) progress_bar.start() # loop over the selected events for parameter_index, parameter_range in enumerate(parameter_ranges): logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') analyze_data.reset() # resets the data of the last analysis # loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given readout_cluster_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up hist = None for clusters, index in analysis_utils.data_aligned_at_events(cluster_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size): n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1] # array with the number of cluster per event, cluster per event are at least 1 if hist is None: hist = np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0] else: hist = np.add(hist, np.histogram(n_cluster_per_event, bins=10, range=(0, 10))[0]) if include_no_cluster and parameter_range[3] is not None: # happend for the last readout hist[0] = (parameter_range[3] - parameter_range[2]) - len(n_cluster_per_event) # add the events without any cluster readout_cluster_len += clusters.shape[0] total_cluster -= len(clusters) progress_bar.update(index) best_chunk_size = int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if plot_n_cluster_hists: plotting.plot_1d_hist(hist, title='Number of cluster per event at ' + str(parameter_range[0]), x_axis_title='Number of cluster', y_axis_title='#', log_y=True, filename=output_pdf) hist = hist.astype('f4') / np.sum(hist) # calculate fraction from total numbers if time_line_absolute: time_stamp.append(parameter_range[0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.append((parameter_range[0] - start_time) / 60.0) n_cluster.append(hist) progress_bar.finish() if total_cluster != 0: logging.warning('Not all clusters were selected during analysis. Analysis is therefore not exact') if time_line_absolute: plotting.plot_scatter_time(time_stamp, n_cluster, title='Number of cluster per event as a function of time', marker_style='o', filename=output_pdf, legend=('0 cluster', '1 cluster', '2 cluster', '3 cluster') if include_no_cluster else ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster')) else: plotting.plot_scatter(time_stamp, n_cluster, title='Number of cluster per event as a function of time', x_label='time [min.]', marker_style='o', filename=output_pdf, legend=('0 cluster', '1 cluster', '2 cluster', '3 cluster') if include_no_cluster else ('0 cluster not plotted', '1 cluster', '2 cluster', '3 cluster')) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: cluster_array = np.array(n_cluster) rec_array = np.array(zip(time_stamp, cluster_array[:, 0], cluster_array[:, 1], cluster_array[:, 2], cluster_array[:, 3], cluster_array[:, 4], cluster_array[:, 5]), dtype=[('time_stamp', float), ('cluster_0', float), ('cluster_1', float), ('cluster_2', float), ('cluster_3', float), ('cluster_4', float), ('cluster_5', float)]).view(np.recarray) try: n_cluster_table = out_file_h5.create_table(out_file_h5.root, name='n_cluster', description=rec_array, title='Cluster per event', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) n_cluster_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.') return time_stamp, n_cluster
[ "def", "analyse_n_cluster_per_event", "(", "scan_base", ",", "include_no_cluster", "=", "False", ",", "time_line_absolute", "=", "True", ",", "combine_n_readouts", "=", "1000", ",", "chunk_size", "=", "10000000", ",", "plot_n_cluster_hists", "=", "False", ",", "output_pdf", "=", "None", ",", "output_file", "=", "None", ")", ":", "time_stamp", "=", "[", "]", "n_cluster", "=", "[", "]", "start_time_set", "=", "False", "for", "data_file", "in", "scan_base", ":", "with", "tb", ".", "open_file", "(", "data_file", "+", "'_interpreted.h5'", ",", "mode", "=", "\"r+\"", ")", "as", "in_cluster_file_h5", ":", "# get data and data pointer", "meta_data_array", "=", "in_cluster_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "cluster_table", "=", "in_cluster_file_h5", ".", "root", ".", "Cluster", "# determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)", "parameter_ranges", "=", "np", ".", "column_stack", "(", "(", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'timestamp_start'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ",", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'event_number'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ")", ")", "# create a event_numer index (important for speed)", "analysis_utils", ".", "index_event_number", "(", "cluster_table", ")", "# initialize the analysis and set settings", "analyze_data", "=", "AnalyzeRawData", "(", ")", "analyze_data", ".", "create_tot_hist", "=", "False", "analyze_data", ".", "create_bcid_hist", "=", "False", "# variables for read speed up", "index", "=", "0", "# index where to start the read out, 0 at the beginning, increased during looping", "best_chunk_size", "=", "chunk_size", "total_cluster", "=", "cluster_table", ".", "shape", "[", "0", "]", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "total_cluster", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "# loop over the selected events", "for", "parameter_index", ",", "parameter_range", "in", "enumerate", "(", "parameter_ranges", ")", ":", "logging", ".", "debug", "(", "'Analyze time stamp '", "+", "str", "(", "parameter_range", "[", "0", "]", ")", "+", "' and data from events = ['", "+", "str", "(", "parameter_range", "[", "2", "]", ")", "+", "','", "+", "str", "(", "parameter_range", "[", "3", "]", ")", "+", "'[ '", "+", "str", "(", "int", "(", "float", "(", "float", "(", "parameter_index", ")", "/", "float", "(", "len", "(", "parameter_ranges", ")", ")", "*", "100.0", ")", ")", ")", "+", "'%'", ")", "analyze_data", ".", "reset", "(", ")", "# resets the data of the last analysis", "# loop over the cluster in the actual selected events with optimizations: determine best chunk size, start word index given", "readout_cluster_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "hist", "=", "None", "for", "clusters", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "cluster_table", ",", "start_event_number", "=", "parameter_range", "[", "2", "]", ",", "stop_event_number", "=", "parameter_range", "[", "3", "]", ",", "start_index", "=", "index", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "n_cluster_per_event", "=", "analysis_utils", ".", "get_n_cluster_in_events", "(", "clusters", "[", "'event_number'", "]", ")", "[", ":", ",", "1", "]", "# array with the number of cluster per event, cluster per event are at least 1", "if", "hist", "is", "None", ":", "hist", "=", "np", ".", "histogram", "(", "n_cluster_per_event", ",", "bins", "=", "10", ",", "range", "=", "(", "0", ",", "10", ")", ")", "[", "0", "]", "else", ":", "hist", "=", "np", ".", "add", "(", "hist", ",", "np", ".", "histogram", "(", "n_cluster_per_event", ",", "bins", "=", "10", ",", "range", "=", "(", "0", ",", "10", ")", ")", "[", "0", "]", ")", "if", "include_no_cluster", "and", "parameter_range", "[", "3", "]", "is", "not", "None", ":", "# happend for the last readout", "hist", "[", "0", "]", "=", "(", "parameter_range", "[", "3", "]", "-", "parameter_range", "[", "2", "]", ")", "-", "len", "(", "n_cluster_per_event", ")", "# add the events without any cluster", "readout_cluster_len", "+=", "clusters", ".", "shape", "[", "0", "]", "total_cluster", "-=", "len", "(", "clusters", ")", "progress_bar", ".", "update", "(", "index", ")", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_cluster_len", ")", "if", "int", "(", "1.05", "*", "readout_cluster_len", ")", "<", "chunk_size", "else", "chunk_size", "# to increase the readout speed, estimated the number of hits for one read instruction", "if", "plot_n_cluster_hists", ":", "plotting", ".", "plot_1d_hist", "(", "hist", ",", "title", "=", "'Number of cluster per event at '", "+", "str", "(", "parameter_range", "[", "0", "]", ")", ",", "x_axis_title", "=", "'Number of cluster'", ",", "y_axis_title", "=", "'#'", ",", "log_y", "=", "True", ",", "filename", "=", "output_pdf", ")", "hist", "=", "hist", ".", "astype", "(", "'f4'", ")", "/", "np", ".", "sum", "(", "hist", ")", "# calculate fraction from total numbers", "if", "time_line_absolute", ":", "time_stamp", ".", "append", "(", "parameter_range", "[", "0", "]", ")", "else", ":", "if", "not", "start_time_set", ":", "start_time", "=", "parameter_ranges", "[", "0", ",", "0", "]", "start_time_set", "=", "True", "time_stamp", ".", "append", "(", "(", "parameter_range", "[", "0", "]", "-", "start_time", ")", "/", "60.0", ")", "n_cluster", ".", "append", "(", "hist", ")", "progress_bar", ".", "finish", "(", ")", "if", "total_cluster", "!=", "0", ":", "logging", ".", "warning", "(", "'Not all clusters were selected during analysis. Analysis is therefore not exact'", ")", "if", "time_line_absolute", ":", "plotting", ".", "plot_scatter_time", "(", "time_stamp", ",", "n_cluster", ",", "title", "=", "'Number of cluster per event as a function of time'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ",", "legend", "=", "(", "'0 cluster'", ",", "'1 cluster'", ",", "'2 cluster'", ",", "'3 cluster'", ")", "if", "include_no_cluster", "else", "(", "'0 cluster not plotted'", ",", "'1 cluster'", ",", "'2 cluster'", ",", "'3 cluster'", ")", ")", "else", ":", "plotting", ".", "plot_scatter", "(", "time_stamp", ",", "n_cluster", ",", "title", "=", "'Number of cluster per event as a function of time'", ",", "x_label", "=", "'time [min.]'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ",", "legend", "=", "(", "'0 cluster'", ",", "'1 cluster'", ",", "'2 cluster'", ",", "'3 cluster'", ")", "if", "include_no_cluster", "else", "(", "'0 cluster not plotted'", ",", "'1 cluster'", ",", "'2 cluster'", ",", "'3 cluster'", ")", ")", "if", "output_file", ":", "with", "tb", ".", "open_file", "(", "output_file", ",", "mode", "=", "\"a\"", ")", "as", "out_file_h5", ":", "cluster_array", "=", "np", ".", "array", "(", "n_cluster", ")", "rec_array", "=", "np", ".", "array", "(", "zip", "(", "time_stamp", ",", "cluster_array", "[", ":", ",", "0", "]", ",", "cluster_array", "[", ":", ",", "1", "]", ",", "cluster_array", "[", ":", ",", "2", "]", ",", "cluster_array", "[", ":", ",", "3", "]", ",", "cluster_array", "[", ":", ",", "4", "]", ",", "cluster_array", "[", ":", ",", "5", "]", ")", ",", "dtype", "=", "[", "(", "'time_stamp'", ",", "float", ")", ",", "(", "'cluster_0'", ",", "float", ")", ",", "(", "'cluster_1'", ",", "float", ")", ",", "(", "'cluster_2'", ",", "float", ")", ",", "(", "'cluster_3'", ",", "float", ")", ",", "(", "'cluster_4'", ",", "float", ")", ",", "(", "'cluster_5'", ",", "float", ")", "]", ")", ".", "view", "(", "np", ".", "recarray", ")", "try", ":", "n_cluster_table", "=", "out_file_h5", ".", "create_table", "(", "out_file_h5", ".", "root", ",", "name", "=", "'n_cluster'", ",", "description", "=", "rec_array", ",", "title", "=", "'Cluster per event'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "n_cluster_table", "[", ":", "]", "=", "rec_array", "except", "tb", ".", "exceptions", ".", "NodeError", ":", "logging", ".", "warning", "(", "output_file", "+", "' has already a Beamspot note, do not overwrite existing.'", ")", "return", "time_stamp", ",", "n_cluster" ]
Determines the number of cluster per event as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] include_no_cluster: bool Set to true to also consider all events without any hit. combine_n_readouts: int the number of read outs to combine (e.g. 1000) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen
[ "Determines", "the", "number", "of", "cluster", "per", "event", "as", "a", "function", "of", "time", ".", "Therefore", "the", "data", "of", "a", "fixed", "number", "of", "read", "outs", "are", "combined", "(", "combine_n_readouts", ")", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L150-L248
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
select_hits_from_cluster_info
def select_hits_from_cluster_info(input_file_hits, output_file_hits, cluster_size_condition, n_cluster_condition, chunk_size=4000000): ''' Takes a hit table and stores only selected hits into a new table. The selection is done on an event base and events are selected if they have a certain number of cluster or cluster size. To increase the analysis speed a event index for the input hit file is created first. Since a cluster hit table can be created to this way of hit selection is not needed anymore. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits cluster_size_condition: str the cluster size condition to select events (e.g.: 'cluster_size_condition <= 2') n_cluster_condition: str the number of cluster in a event ((e.g.: 'n_cluster_condition == 1') ''' logging.info('Write hits of events from ' + str(input_file_hits) + ' with ' + cluster_size_condition + ' and ' + n_cluster_condition + ' into ' + str(output_file_hits)) with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: analysis_utils.index_event_number(in_hit_file_h5.root.Hits) analysis_utils.index_event_number(in_hit_file_h5.root.Cluster) with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) cluster_table = in_hit_file_h5.root.Cluster last_word_number = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for data, index in analysis_utils.data_aligned_at_events(cluster_table, chunk_size=chunk_size): selected_events_1 = analysis_utils.get_events_with_cluster_size(event_number=data['event_number'], cluster_size=data['size'], condition=cluster_size_condition) # select the events with clusters of a certain size selected_events_2 = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition=n_cluster_condition) # select the events with a certain cluster number selected_events = analysis_utils.get_events_in_both_arrays(selected_events_1, selected_events_2) # select events with both conditions above logging.debug('Selected ' + str(len(selected_events)) + ' events with ' + n_cluster_condition + ' and ' + cluster_size_condition) last_word_number = analysis_utils.write_hits_in_events(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, events=selected_events, start_hit_word=last_word_number) # write the hits of the selected events into a new table progress_bar.update(index) progress_bar.finish() in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root)
python
def select_hits_from_cluster_info(input_file_hits, output_file_hits, cluster_size_condition, n_cluster_condition, chunk_size=4000000): ''' Takes a hit table and stores only selected hits into a new table. The selection is done on an event base and events are selected if they have a certain number of cluster or cluster size. To increase the analysis speed a event index for the input hit file is created first. Since a cluster hit table can be created to this way of hit selection is not needed anymore. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits cluster_size_condition: str the cluster size condition to select events (e.g.: 'cluster_size_condition <= 2') n_cluster_condition: str the number of cluster in a event ((e.g.: 'n_cluster_condition == 1') ''' logging.info('Write hits of events from ' + str(input_file_hits) + ' with ' + cluster_size_condition + ' and ' + n_cluster_condition + ' into ' + str(output_file_hits)) with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: analysis_utils.index_event_number(in_hit_file_h5.root.Hits) analysis_utils.index_event_number(in_hit_file_h5.root.Cluster) with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) cluster_table = in_hit_file_h5.root.Cluster last_word_number = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for data, index in analysis_utils.data_aligned_at_events(cluster_table, chunk_size=chunk_size): selected_events_1 = analysis_utils.get_events_with_cluster_size(event_number=data['event_number'], cluster_size=data['size'], condition=cluster_size_condition) # select the events with clusters of a certain size selected_events_2 = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition=n_cluster_condition) # select the events with a certain cluster number selected_events = analysis_utils.get_events_in_both_arrays(selected_events_1, selected_events_2) # select events with both conditions above logging.debug('Selected ' + str(len(selected_events)) + ' events with ' + n_cluster_condition + ' and ' + cluster_size_condition) last_word_number = analysis_utils.write_hits_in_events(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, events=selected_events, start_hit_word=last_word_number) # write the hits of the selected events into a new table progress_bar.update(index) progress_bar.finish() in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root)
[ "def", "select_hits_from_cluster_info", "(", "input_file_hits", ",", "output_file_hits", ",", "cluster_size_condition", ",", "n_cluster_condition", ",", "chunk_size", "=", "4000000", ")", ":", "logging", ".", "info", "(", "'Write hits of events from '", "+", "str", "(", "input_file_hits", ")", "+", "' with '", "+", "cluster_size_condition", "+", "' and '", "+", "n_cluster_condition", "+", "' into '", "+", "str", "(", "output_file_hits", ")", ")", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "analysis_utils", ".", "index_event_number", "(", "in_hit_file_h5", ".", "root", ".", "Hits", ")", "analysis_utils", ".", "index_event_number", "(", "in_hit_file_h5", ".", "root", ".", "Cluster", ")", "with", "tb", ".", "open_file", "(", "output_file_hits", ",", "mode", "=", "\"w\"", ")", "as", "out_hit_file_h5", ":", "hit_table_out", "=", "out_hit_file_h5", ".", "create_table", "(", "out_hit_file_h5", ".", "root", ",", "name", "=", "'Hits'", ",", "description", "=", "data_struct", ".", "HitInfoTable", ",", "title", "=", "'hit_data'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "cluster_table", "=", "in_hit_file_h5", ".", "root", ".", "Cluster", "last_word_number", "=", "0", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "cluster_table", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "for", "data", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "cluster_table", ",", "chunk_size", "=", "chunk_size", ")", ":", "selected_events_1", "=", "analysis_utils", ".", "get_events_with_cluster_size", "(", "event_number", "=", "data", "[", "'event_number'", "]", ",", "cluster_size", "=", "data", "[", "'size'", "]", ",", "condition", "=", "cluster_size_condition", ")", "# select the events with clusters of a certain size", "selected_events_2", "=", "analysis_utils", ".", "get_events_with_n_cluster", "(", "event_number", "=", "data", "[", "'event_number'", "]", ",", "condition", "=", "n_cluster_condition", ")", "# select the events with a certain cluster number", "selected_events", "=", "analysis_utils", ".", "get_events_in_both_arrays", "(", "selected_events_1", ",", "selected_events_2", ")", "# select events with both conditions above", "logging", ".", "debug", "(", "'Selected '", "+", "str", "(", "len", "(", "selected_events", ")", ")", "+", "' events with '", "+", "n_cluster_condition", "+", "' and '", "+", "cluster_size_condition", ")", "last_word_number", "=", "analysis_utils", ".", "write_hits_in_events", "(", "hit_table_in", "=", "in_hit_file_h5", ".", "root", ".", "Hits", ",", "hit_table_out", "=", "hit_table_out", ",", "events", "=", "selected_events", ",", "start_hit_word", "=", "last_word_number", ")", "# write the hits of the selected events into a new table", "progress_bar", ".", "update", "(", "index", ")", "progress_bar", ".", "finish", "(", ")", "in_hit_file_h5", ".", "root", ".", "meta_data", ".", "copy", "(", "out_hit_file_h5", ".", "root", ")" ]
Takes a hit table and stores only selected hits into a new table. The selection is done on an event base and events are selected if they have a certain number of cluster or cluster size. To increase the analysis speed a event index for the input hit file is created first. Since a cluster hit table can be created to this way of hit selection is not needed anymore. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits cluster_size_condition: str the cluster size condition to select events (e.g.: 'cluster_size_condition <= 2') n_cluster_condition: str the number of cluster in a event ((e.g.: 'n_cluster_condition == 1')
[ "Takes", "a", "hit", "table", "and", "stores", "only", "selected", "hits", "into", "a", "new", "table", ".", "The", "selection", "is", "done", "on", "an", "event", "base", "and", "events", "are", "selected", "if", "they", "have", "a", "certain", "number", "of", "cluster", "or", "cluster", "size", ".", "To", "increase", "the", "analysis", "speed", "a", "event", "index", "for", "the", "input", "hit", "file", "is", "created", "first", ".", "Since", "a", "cluster", "hit", "table", "can", "be", "created", "to", "this", "way", "of", "hit", "selection", "is", "not", "needed", "anymore", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L251-L285
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
select_hits
def select_hits(input_file_hits, output_file_hits, condition=None, cluster_size_condition=None, n_cluster_condition=None, chunk_size=5000000): ''' Takes a hit table and stores only selected hits into a new table. The selection of hits is done with a numexp string. Only if this expression evaluates to true the hit is taken. One can also select hits from cluster conditions. This selection is done on an event basis, meaning events are selected where the cluster condition is true and then hits of these events are taken. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits condition: str Numexpr string to select hits (e.g.: '(relative_BCID == 6) & (column == row)') All hit infos can be used (column, row, ...) cluster_size_condition: int Hit of events with the given cluster size are selected. n_cluster_condition: int Hit of events with the given cluster number are selected. ''' logging.info('Write hits with ' + condition + ' into ' + str(output_file_hits)) if cluster_size_condition is None and n_cluster_condition is None: # no cluster cuts are done with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: analysis_utils.index_event_number(in_hit_file_h5.root.Hits) # create event index for faster selection with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) analysis_utils.write_hits_in_event_range(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, condition=condition) # write the hits of the selected events into a new table in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root) # copy meta_data note to new file else: with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open file with hit/cluster data with r+ to be able to create index analysis_utils.index_event_number(in_hit_file_h5.root.Hits) # create event index for faster selection analysis_utils.index_event_number(in_hit_file_h5.root.Cluster) # create event index for faster selection with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) cluster_table = in_hit_file_h5.root.Cluster last_word_number = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for data, index in analysis_utils.data_aligned_at_events(cluster_table, chunk_size=chunk_size): if cluster_size_condition is not None: selected_events = analysis_utils.get_events_with_cluster_size(event_number=data['event_number'], cluster_size=data['size'], condition='cluster_size == ' + str(cluster_size_condition)) # select the events with only 1 hit cluster if n_cluster_condition is not None: selected_events_2 = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition='n_cluster == ' + str(n_cluster_condition)) # select the events with only 1 cluster selected_events = selected_events[analysis_utils.in1d_events(selected_events, selected_events_2)] # select events with the first two conditions above elif n_cluster_condition is not None: selected_events = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition='n_cluster == ' + str(n_cluster_condition)) else: raise RuntimeError('Cannot understand cluster selection criterion') last_word_number = analysis_utils.write_hits_in_events(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, events=selected_events, start_hit_word=last_word_number, condition=condition, chunk_size=chunk_size) # write the hits of the selected events into a new table progress_bar.update(index) progress_bar.finish() in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root)
python
def select_hits(input_file_hits, output_file_hits, condition=None, cluster_size_condition=None, n_cluster_condition=None, chunk_size=5000000): ''' Takes a hit table and stores only selected hits into a new table. The selection of hits is done with a numexp string. Only if this expression evaluates to true the hit is taken. One can also select hits from cluster conditions. This selection is done on an event basis, meaning events are selected where the cluster condition is true and then hits of these events are taken. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits condition: str Numexpr string to select hits (e.g.: '(relative_BCID == 6) & (column == row)') All hit infos can be used (column, row, ...) cluster_size_condition: int Hit of events with the given cluster size are selected. n_cluster_condition: int Hit of events with the given cluster number are selected. ''' logging.info('Write hits with ' + condition + ' into ' + str(output_file_hits)) if cluster_size_condition is None and n_cluster_condition is None: # no cluster cuts are done with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: analysis_utils.index_event_number(in_hit_file_h5.root.Hits) # create event index for faster selection with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) analysis_utils.write_hits_in_event_range(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, condition=condition) # write the hits of the selected events into a new table in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root) # copy meta_data note to new file else: with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open file with hit/cluster data with r+ to be able to create index analysis_utils.index_event_number(in_hit_file_h5.root.Hits) # create event index for faster selection analysis_utils.index_event_number(in_hit_file_h5.root.Cluster) # create event index for faster selection with tb.open_file(output_file_hits, mode="w") as out_hit_file_h5: hit_table_out = out_hit_file_h5.create_table(out_hit_file_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) cluster_table = in_hit_file_h5.root.Cluster last_word_number = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for data, index in analysis_utils.data_aligned_at_events(cluster_table, chunk_size=chunk_size): if cluster_size_condition is not None: selected_events = analysis_utils.get_events_with_cluster_size(event_number=data['event_number'], cluster_size=data['size'], condition='cluster_size == ' + str(cluster_size_condition)) # select the events with only 1 hit cluster if n_cluster_condition is not None: selected_events_2 = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition='n_cluster == ' + str(n_cluster_condition)) # select the events with only 1 cluster selected_events = selected_events[analysis_utils.in1d_events(selected_events, selected_events_2)] # select events with the first two conditions above elif n_cluster_condition is not None: selected_events = analysis_utils.get_events_with_n_cluster(event_number=data['event_number'], condition='n_cluster == ' + str(n_cluster_condition)) else: raise RuntimeError('Cannot understand cluster selection criterion') last_word_number = analysis_utils.write_hits_in_events(hit_table_in=in_hit_file_h5.root.Hits, hit_table_out=hit_table_out, events=selected_events, start_hit_word=last_word_number, condition=condition, chunk_size=chunk_size) # write the hits of the selected events into a new table progress_bar.update(index) progress_bar.finish() in_hit_file_h5.root.meta_data.copy(out_hit_file_h5.root)
[ "def", "select_hits", "(", "input_file_hits", ",", "output_file_hits", ",", "condition", "=", "None", ",", "cluster_size_condition", "=", "None", ",", "n_cluster_condition", "=", "None", ",", "chunk_size", "=", "5000000", ")", ":", "logging", ".", "info", "(", "'Write hits with '", "+", "condition", "+", "' into '", "+", "str", "(", "output_file_hits", ")", ")", "if", "cluster_size_condition", "is", "None", "and", "n_cluster_condition", "is", "None", ":", "# no cluster cuts are done", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "analysis_utils", ".", "index_event_number", "(", "in_hit_file_h5", ".", "root", ".", "Hits", ")", "# create event index for faster selection", "with", "tb", ".", "open_file", "(", "output_file_hits", ",", "mode", "=", "\"w\"", ")", "as", "out_hit_file_h5", ":", "hit_table_out", "=", "out_hit_file_h5", ".", "create_table", "(", "out_hit_file_h5", ".", "root", ",", "name", "=", "'Hits'", ",", "description", "=", "data_struct", ".", "HitInfoTable", ",", "title", "=", "'hit_data'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "analysis_utils", ".", "write_hits_in_event_range", "(", "hit_table_in", "=", "in_hit_file_h5", ".", "root", ".", "Hits", ",", "hit_table_out", "=", "hit_table_out", ",", "condition", "=", "condition", ")", "# write the hits of the selected events into a new table", "in_hit_file_h5", ".", "root", ".", "meta_data", ".", "copy", "(", "out_hit_file_h5", ".", "root", ")", "# copy meta_data note to new file", "else", ":", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "# open file with hit/cluster data with r+ to be able to create index", "analysis_utils", ".", "index_event_number", "(", "in_hit_file_h5", ".", "root", ".", "Hits", ")", "# create event index for faster selection", "analysis_utils", ".", "index_event_number", "(", "in_hit_file_h5", ".", "root", ".", "Cluster", ")", "# create event index for faster selection", "with", "tb", ".", "open_file", "(", "output_file_hits", ",", "mode", "=", "\"w\"", ")", "as", "out_hit_file_h5", ":", "hit_table_out", "=", "out_hit_file_h5", ".", "create_table", "(", "out_hit_file_h5", ".", "root", ",", "name", "=", "'Hits'", ",", "description", "=", "data_struct", ".", "HitInfoTable", ",", "title", "=", "'hit_data'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "cluster_table", "=", "in_hit_file_h5", ".", "root", ".", "Cluster", "last_word_number", "=", "0", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "cluster_table", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "for", "data", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "cluster_table", ",", "chunk_size", "=", "chunk_size", ")", ":", "if", "cluster_size_condition", "is", "not", "None", ":", "selected_events", "=", "analysis_utils", ".", "get_events_with_cluster_size", "(", "event_number", "=", "data", "[", "'event_number'", "]", ",", "cluster_size", "=", "data", "[", "'size'", "]", ",", "condition", "=", "'cluster_size == '", "+", "str", "(", "cluster_size_condition", ")", ")", "# select the events with only 1 hit cluster", "if", "n_cluster_condition", "is", "not", "None", ":", "selected_events_2", "=", "analysis_utils", ".", "get_events_with_n_cluster", "(", "event_number", "=", "data", "[", "'event_number'", "]", ",", "condition", "=", "'n_cluster == '", "+", "str", "(", "n_cluster_condition", ")", ")", "# select the events with only 1 cluster", "selected_events", "=", "selected_events", "[", "analysis_utils", ".", "in1d_events", "(", "selected_events", ",", "selected_events_2", ")", "]", "# select events with the first two conditions above", "elif", "n_cluster_condition", "is", "not", "None", ":", "selected_events", "=", "analysis_utils", ".", "get_events_with_n_cluster", "(", "event_number", "=", "data", "[", "'event_number'", "]", ",", "condition", "=", "'n_cluster == '", "+", "str", "(", "n_cluster_condition", ")", ")", "else", ":", "raise", "RuntimeError", "(", "'Cannot understand cluster selection criterion'", ")", "last_word_number", "=", "analysis_utils", ".", "write_hits_in_events", "(", "hit_table_in", "=", "in_hit_file_h5", ".", "root", ".", "Hits", ",", "hit_table_out", "=", "hit_table_out", ",", "events", "=", "selected_events", ",", "start_hit_word", "=", "last_word_number", ",", "condition", "=", "condition", ",", "chunk_size", "=", "chunk_size", ")", "# write the hits of the selected events into a new table", "progress_bar", ".", "update", "(", "index", ")", "progress_bar", ".", "finish", "(", ")", "in_hit_file_h5", ".", "root", ".", "meta_data", ".", "copy", "(", "out_hit_file_h5", ".", "root", ")" ]
Takes a hit table and stores only selected hits into a new table. The selection of hits is done with a numexp string. Only if this expression evaluates to true the hit is taken. One can also select hits from cluster conditions. This selection is done on an event basis, meaning events are selected where the cluster condition is true and then hits of these events are taken. Parameters ---------- input_file_hits: str the input file name with hits output_file_hits: str the output file name for the hits condition: str Numexpr string to select hits (e.g.: '(relative_BCID == 6) & (column == row)') All hit infos can be used (column, row, ...) cluster_size_condition: int Hit of events with the given cluster size are selected. n_cluster_condition: int Hit of events with the given cluster number are selected.
[ "Takes", "a", "hit", "table", "and", "stores", "only", "selected", "hits", "into", "a", "new", "table", ".", "The", "selection", "of", "hits", "is", "done", "with", "a", "numexp", "string", ".", "Only", "if", "this", "expression", "evaluates", "to", "true", "the", "hit", "is", "taken", ".", "One", "can", "also", "select", "hits", "from", "cluster", "conditions", ".", "This", "selection", "is", "done", "on", "an", "event", "basis", "meaning", "events", "are", "selected", "where", "the", "cluster", "condition", "is", "true", "and", "then", "hits", "of", "these", "events", "are", "taken", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L288-L338
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
analyze_cluster_size_per_scan_parameter
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): ''' This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed ''' logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.') else: with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[parameter] # scan parameter settings used if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.') else: logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events analyze_data.reset() # resets the data of the last analysis logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[') actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0])) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning('Analysis shows inconsistent number of hits. Check needed!') logging.info('Analyzed %d hits!', total_hits) cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
python
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): ''' This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed ''' logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.') else: with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data cluster_size_total = None # final array for the cluster size per GDAC with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting scan_parameter_values = scan_parameter[parameter] # scan parameter settings used if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.') else: logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size # initialize the analysis and set settings analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events analyze_data.reset() # resets the data of the last analysis logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%') start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[') actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0])) # loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits chunk_size = 50 # get occupancy hist occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent # store and plot cluster size hist cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf) if cluster_size_total is None: # true if no data was appended to the array yet cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning('Analysis shows inconsistent number of hits. Check needed!') logging.info('Analyzed %d hits!', total_hits) cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
[ "def", "analyze_cluster_size_per_scan_parameter", "(", "input_file_hits", ",", "output_file_cluster_size", ",", "parameter", "=", "'GDAC'", ",", "max_chunk_size", "=", "10000000", ",", "overwrite_output_files", "=", "False", ",", "output_pdf", "=", "None", ")", ":", "logging", ".", "info", "(", "'Analyze the cluster sizes for different '", "+", "parameter", "+", "' settings for '", "+", "input_file_hits", ")", "if", "os", ".", "path", ".", "isfile", "(", "output_file_cluster_size", ")", "and", "not", "overwrite_output_files", ":", "# skip analysis if already done", "logging", ".", "info", "(", "'Analyzed cluster size file '", "+", "output_file_cluster_size", "+", "' already exists. Skip cluster size analysis.'", ")", "else", ":", "with", "tb", ".", "open_file", "(", "output_file_cluster_size", ",", "mode", "=", "\"w\"", ")", "as", "out_file_h5", ":", "# file to write the data into", "filter_table", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", "# compression of the written data", "parameter_goup", "=", "out_file_h5", ".", "create_group", "(", "out_file_h5", ".", "root", ",", "parameter", ",", "title", "=", "parameter", ")", "# note to store the data", "cluster_size_total", "=", "None", "# final array for the cluster size per GDAC", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "# open the actual hit file", "meta_data_array", "=", "in_hit_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "scan_parameter", "=", "analysis_utils", ".", "get_scan_parameter", "(", "meta_data_array", ")", "# get the scan parameters", "if", "scan_parameter", ":", "# if a GDAC scan parameter was used analyze the cluster size per GDAC setting", "scan_parameter_values", "=", "scan_parameter", "[", "parameter", "]", "# scan parameter settings used", "if", "len", "(", "scan_parameter_values", ")", "==", "1", ":", "# only analyze per scan step if there are more than one scan step", "logging", ".", "warning", "(", "'The file '", "+", "str", "(", "input_file_hits", ")", "+", "' has no different '", "+", "str", "(", "parameter", ")", "+", "' parameter values. Omit analysis.'", ")", "else", ":", "logging", ".", "info", "(", "'Analyze '", "+", "input_file_hits", "+", "' per scan parameter '", "+", "parameter", "+", "' for '", "+", "str", "(", "len", "(", "scan_parameter_values", ")", ")", "+", "' values from '", "+", "str", "(", "np", ".", "amin", "(", "scan_parameter_values", ")", ")", "+", "' to '", "+", "str", "(", "np", ".", "amax", "(", "scan_parameter_values", ")", ")", ")", "event_numbers", "=", "analysis_utils", ".", "get_meta_data_at_scan_parameter", "(", "meta_data_array", ",", "parameter", ")", "[", "'event_number'", "]", "# get the event numbers in meta_data where the scan parameter changes", "parameter_ranges", "=", "np", ".", "column_stack", "(", "(", "scan_parameter_values", ",", "analysis_utils", ".", "get_ranges_from_array", "(", "event_numbers", ")", ")", ")", "hit_table", "=", "in_hit_file_h5", ".", "root", ".", "Hits", "analysis_utils", ".", "index_event_number", "(", "hit_table", ")", "total_hits", ",", "total_hits_2", ",", "index", "=", "0", ",", "0", ",", "0", "chunk_size", "=", "max_chunk_size", "# initialize the analysis and set settings", "analyze_data", "=", "AnalyzeRawData", "(", ")", "analyze_data", ".", "create_cluster_size_hist", "=", "True", "analyze_data", ".", "create_cluster_tot_hist", "=", "True", "analyze_data", ".", "histogram", ".", "set_no_scan_parameter", "(", ")", "# one has to tell histogram the # of scan parameters for correct occupancy hist allocation", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "hit_table", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "for", "parameter_index", ",", "parameter_range", "in", "enumerate", "(", "parameter_ranges", ")", ":", "# loop over the selected events", "analyze_data", ".", "reset", "(", ")", "# resets the data of the last analysis", "logging", ".", "debug", "(", "'Analyze GDAC = '", "+", "str", "(", "parameter_range", "[", "0", "]", ")", "+", "' '", "+", "str", "(", "int", "(", "float", "(", "float", "(", "parameter_index", ")", "/", "float", "(", "len", "(", "parameter_ranges", ")", ")", "*", "100.0", ")", ")", ")", "+", "'%'", ")", "start_event_number", "=", "parameter_range", "[", "1", "]", "stop_event_number", "=", "parameter_range", "[", "2", "]", "logging", ".", "debug", "(", "'Data from events = ['", "+", "str", "(", "start_event_number", ")", "+", "','", "+", "str", "(", "stop_event_number", ")", "+", "'['", ")", "actual_parameter_group", "=", "out_file_h5", ".", "create_group", "(", "parameter_goup", ",", "name", "=", "parameter", "+", "'_'", "+", "str", "(", "parameter_range", "[", "0", "]", ")", ",", "title", "=", "parameter", "+", "'_'", "+", "str", "(", "parameter_range", "[", "0", "]", ")", ")", "# loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given", "readout_hit_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "for", "hits", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "hit_table", ",", "start_event_number", "=", "start_event_number", ",", "stop_event_number", "=", "stop_event_number", ",", "start_index", "=", "index", ",", "chunk_size", "=", "chunk_size", ")", ":", "total_hits", "+=", "hits", ".", "shape", "[", "0", "]", "analyze_data", ".", "analyze_hits", "(", "hits", ")", "# analyze the selected hits in chunks", "readout_hit_len", "+=", "hits", ".", "shape", "[", "0", "]", "progress_bar", ".", "update", "(", "index", ")", "chunk_size", "=", "int", "(", "1.05", "*", "readout_hit_len", ")", "if", "int", "(", "1.05", "*", "readout_hit_len", ")", "<", "max_chunk_size", "else", "max_chunk_size", "# to increase the readout speed, estimated the number of hits for one read instruction", "if", "chunk_size", "<", "50", ":", "# limit the lower chunk size, there can always be a crazy event with more than 20 hits", "chunk_size", "=", "50", "# get occupancy hist", "occupancy", "=", "analyze_data", ".", "histogram", ".", "get_occupancy", "(", ")", "# just check here if histogram is consistent", "# store and plot cluster size hist", "cluster_size_hist", "=", "analyze_data", ".", "clusterizer", ".", "get_cluster_size_hist", "(", ")", "cluster_size_hist_table", "=", "out_file_h5", ".", "create_carray", "(", "actual_parameter_group", ",", "name", "=", "'HistClusterSize'", ",", "title", "=", "'Cluster Size Histogram'", ",", "atom", "=", "tb", ".", "Atom", ".", "from_dtype", "(", "cluster_size_hist", ".", "dtype", ")", ",", "shape", "=", "cluster_size_hist", ".", "shape", ",", "filters", "=", "filter_table", ")", "cluster_size_hist_table", "[", ":", "]", "=", "cluster_size_hist", "if", "output_pdf", "is", "not", "False", ":", "plotting", ".", "plot_cluster_size", "(", "hist", "=", "cluster_size_hist", ",", "title", "=", "'Cluster size ('", "+", "str", "(", "np", ".", "sum", "(", "cluster_size_hist", ")", ")", "+", "' entries) for '", "+", "parameter", "+", "' = '", "+", "str", "(", "scan_parameter_values", "[", "parameter_index", "]", ")", ",", "filename", "=", "output_pdf", ")", "if", "cluster_size_total", "is", "None", ":", "# true if no data was appended to the array yet", "cluster_size_total", "=", "cluster_size_hist", "else", ":", "cluster_size_total", "=", "np", ".", "vstack", "(", "[", "cluster_size_total", ",", "cluster_size_hist", "]", ")", "total_hits_2", "+=", "np", ".", "sum", "(", "occupancy", ")", "progress_bar", ".", "finish", "(", ")", "if", "total_hits", "!=", "total_hits_2", ":", "logging", ".", "warning", "(", "'Analysis shows inconsistent number of hits. Check needed!'", ")", "logging", ".", "info", "(", "'Analyzed %d hits!'", ",", "total_hits", ")", "cluster_size_total_out", "=", "out_file_h5", ".", "create_carray", "(", "out_file_h5", ".", "root", ",", "name", "=", "'AllHistClusterSize'", ",", "title", "=", "'All Cluster Size Histograms'", ",", "atom", "=", "tb", ".", "Atom", ".", "from_dtype", "(", "cluster_size_total", ".", "dtype", ")", ",", "shape", "=", "cluster_size_total", ".", "shape", ",", "filters", "=", "filter_table", ")", "cluster_size_total_out", "[", ":", "]", "=", "cluster_size_total" ]
This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed
[ "This", "method", "takes", "multiple", "hit", "files", "and", "determines", "the", "cluster", "size", "for", "different", "scan", "parameter", "values", "of" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L341-L425
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
histogram_cluster_table
def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000): '''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array. The 3rd dimension of the occupancy array is the number of different scan parameters used Parameters ---------- analyzed_data_file : string HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step. Returns ------- occupancy_array: numpy.array with dimensions (col, row, #scan_parameter) ''' with tb.open_file(analyzed_data_file, mode="r") as in_file_h5: with tb.open_file(output_file, mode="w") as out_file_h5: histogram = PyDataHistograming() histogram.create_occupancy_hist(True) scan_parameters = None event_number_indices = None scan_parameter_indices = None try: meta_data = in_file_h5.root.meta_data[:] scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data) if scan_parameters is not None: scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4') event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64) histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number'])) histogram.add_scan_parameter(scan_parameter_indices) logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters)) else: logging.info("No scan parameter data provided") histogram.set_no_scan_parameter() except tb.exceptions.NoSuchNodeError: logging.info("No meta data provided, use no scan parameter") histogram.set_no_scan_parameter() logging.info('Histogram cluster seeds...') progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80) progress_bar.start() total_cluster = 0 # to check analysis for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size): total_cluster += len(cluster) histogram.add_cluster_seed_hits(cluster, len(cluster)) progress_bar.update(index) progress_bar.finish() filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data occupancy_array = histogram.get_occupancy().T occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table) occupancy_array_table[:] = occupancy_array if total_cluster != np.sum(occupancy_array): logging.warning('Analysis shows inconsistent number of cluster used. Check needed!') in_file_h5.root.meta_data.copy(out_file_h5.root)
python
def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000): '''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array. The 3rd dimension of the occupancy array is the number of different scan parameters used Parameters ---------- analyzed_data_file : string HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step. Returns ------- occupancy_array: numpy.array with dimensions (col, row, #scan_parameter) ''' with tb.open_file(analyzed_data_file, mode="r") as in_file_h5: with tb.open_file(output_file, mode="w") as out_file_h5: histogram = PyDataHistograming() histogram.create_occupancy_hist(True) scan_parameters = None event_number_indices = None scan_parameter_indices = None try: meta_data = in_file_h5.root.meta_data[:] scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data) if scan_parameters is not None: scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4') event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64) histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number'])) histogram.add_scan_parameter(scan_parameter_indices) logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters)) else: logging.info("No scan parameter data provided") histogram.set_no_scan_parameter() except tb.exceptions.NoSuchNodeError: logging.info("No meta data provided, use no scan parameter") histogram.set_no_scan_parameter() logging.info('Histogram cluster seeds...') progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80) progress_bar.start() total_cluster = 0 # to check analysis for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size): total_cluster += len(cluster) histogram.add_cluster_seed_hits(cluster, len(cluster)) progress_bar.update(index) progress_bar.finish() filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data occupancy_array = histogram.get_occupancy().T occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table) occupancy_array_table[:] = occupancy_array if total_cluster != np.sum(occupancy_array): logging.warning('Analysis shows inconsistent number of cluster used. Check needed!') in_file_h5.root.meta_data.copy(out_file_h5.root)
[ "def", "histogram_cluster_table", "(", "analyzed_data_file", ",", "output_file", ",", "chunk_size", "=", "10000000", ")", ":", "with", "tb", ".", "open_file", "(", "analyzed_data_file", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "with", "tb", ".", "open_file", "(", "output_file", ",", "mode", "=", "\"w\"", ")", "as", "out_file_h5", ":", "histogram", "=", "PyDataHistograming", "(", ")", "histogram", ".", "create_occupancy_hist", "(", "True", ")", "scan_parameters", "=", "None", "event_number_indices", "=", "None", "scan_parameter_indices", "=", "None", "try", ":", "meta_data", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "scan_parameters", "=", "analysis_utils", ".", "get_unique_scan_parameter_combinations", "(", "meta_data", ")", "if", "scan_parameters", "is", "not", "None", ":", "scan_parameter_indices", "=", "np", ".", "array", "(", "range", "(", "0", ",", "len", "(", "scan_parameters", ")", ")", ",", "dtype", "=", "'u4'", ")", "event_number_indices", "=", "np", ".", "ascontiguousarray", "(", "scan_parameters", "[", "'event_number'", "]", ")", ".", "astype", "(", "np", ".", "uint64", ")", "histogram", ".", "add_meta_event_index", "(", "event_number_indices", ",", "array_length", "=", "len", "(", "scan_parameters", "[", "'event_number'", "]", ")", ")", "histogram", ".", "add_scan_parameter", "(", "scan_parameter_indices", ")", "logging", ".", "info", "(", "\"Add %d different scan parameter(s) for analysis\"", ",", "len", "(", "scan_parameters", ")", ")", "else", ":", "logging", ".", "info", "(", "\"No scan parameter data provided\"", ")", "histogram", ".", "set_no_scan_parameter", "(", ")", "except", "tb", ".", "exceptions", ".", "NoSuchNodeError", ":", "logging", ".", "info", "(", "\"No meta data provided, use no scan parameter\"", ")", "histogram", ".", "set_no_scan_parameter", "(", ")", "logging", ".", "info", "(", "'Histogram cluster seeds...'", ")", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "in_file_h5", ".", "root", ".", "Cluster", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "total_cluster", "=", "0", "# to check analysis", "for", "cluster", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "in_file_h5", ".", "root", ".", "Cluster", ",", "chunk_size", "=", "chunk_size", ")", ":", "total_cluster", "+=", "len", "(", "cluster", ")", "histogram", ".", "add_cluster_seed_hits", "(", "cluster", ",", "len", "(", "cluster", ")", ")", "progress_bar", ".", "update", "(", "index", ")", "progress_bar", ".", "finish", "(", ")", "filter_table", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", "# compression of the written data", "occupancy_array", "=", "histogram", ".", "get_occupancy", "(", ")", ".", "T", "occupancy_array_table", "=", "out_file_h5", ".", "create_carray", "(", "out_file_h5", ".", "root", ",", "name", "=", "'HistOcc'", ",", "title", "=", "'Occupancy Histogram'", ",", "atom", "=", "tb", ".", "Atom", ".", "from_dtype", "(", "occupancy_array", ".", "dtype", ")", ",", "shape", "=", "occupancy_array", ".", "shape", ",", "filters", "=", "filter_table", ")", "occupancy_array_table", "[", ":", "]", "=", "occupancy_array", "if", "total_cluster", "!=", "np", ".", "sum", "(", "occupancy_array", ")", ":", "logging", ".", "warning", "(", "'Analysis shows inconsistent number of cluster used. Check needed!'", ")", "in_file_h5", ".", "root", ".", "meta_data", ".", "copy", "(", "out_file_h5", ".", "root", ")" ]
Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array. The 3rd dimension of the occupancy array is the number of different scan parameters used Parameters ---------- analyzed_data_file : string HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step. Returns ------- occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
[ "Reads", "in", "the", "cluster", "info", "table", "in", "chunks", "and", "histograms", "the", "seed", "pixels", "into", "one", "occupancy", "array", ".", "The", "3rd", "dimension", "of", "the", "occupancy", "array", "is", "the", "number", "of", "different", "scan", "parameters", "used" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L428-L482
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
analyze_hits_per_scan_parameter
def analyze_hits_per_scan_parameter(analyze_data, scan_parameters=None, chunk_size=50000): '''Takes the hit table and analyzes the hits per scan parameter Parameters ---------- analyze_data : analysis.analyze_raw_data.AnalyzeRawData object with an opened hit file (AnalyzeRawData.out_file_h5) or a file name with the hit data given (AnalyzeRawData._analyzed_data_file) scan_parameters : list of strings: The names of the scan parameters to use chunk_size : int: The chunk size of one hit table read. The bigger the faster. Too big causes memory errors. Returns ------- yields the analysis.analyze_raw_data.AnalyzeRawData for each scan parameter ''' if analyze_data.out_file_h5 is None or analyze_data.out_file_h5.isopen == 0: in_hit_file_h5 = tb.open_file(analyze_data._analyzed_data_file, 'r+') close_file = True else: in_hit_file_h5 = analyze_data.out_file_h5 close_file = False meta_data = in_hit_file_h5.root.meta_data[:] # get the meta data table try: hit_table = in_hit_file_h5.root.Hits # get the hit table except tb.NoSuchNodeError: logging.error('analyze_hits_per_scan_parameter needs a hit table, but no hit table found.') return meta_data_table_at_scan_parameter = analysis_utils.get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = analysis_utils.get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = analysis_utils.get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings analysis_utils.index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.info('Analyze hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) analyze_data.reset() # resets the front end data of the last analysis step but not the options readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=best_chunk_size): analyze_data.analyze_hits(hits, scan_parameter=False) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction file_name = " ".join(re.findall("[a-zA-Z0-9]+", str(scan_parameters))) + '_' + " ".join(re.findall("[a-zA-Z0-9]+", str(parameter_values[parameter_index]))) analyze_data._create_additional_hit_data(safe_to_file=False) analyze_data._create_additional_cluster_data(safe_to_file=False) yield analyze_data, file_name if close_file: in_hit_file_h5.close()
python
def analyze_hits_per_scan_parameter(analyze_data, scan_parameters=None, chunk_size=50000): '''Takes the hit table and analyzes the hits per scan parameter Parameters ---------- analyze_data : analysis.analyze_raw_data.AnalyzeRawData object with an opened hit file (AnalyzeRawData.out_file_h5) or a file name with the hit data given (AnalyzeRawData._analyzed_data_file) scan_parameters : list of strings: The names of the scan parameters to use chunk_size : int: The chunk size of one hit table read. The bigger the faster. Too big causes memory errors. Returns ------- yields the analysis.analyze_raw_data.AnalyzeRawData for each scan parameter ''' if analyze_data.out_file_h5 is None or analyze_data.out_file_h5.isopen == 0: in_hit_file_h5 = tb.open_file(analyze_data._analyzed_data_file, 'r+') close_file = True else: in_hit_file_h5 = analyze_data.out_file_h5 close_file = False meta_data = in_hit_file_h5.root.meta_data[:] # get the meta data table try: hit_table = in_hit_file_h5.root.Hits # get the hit table except tb.NoSuchNodeError: logging.error('analyze_hits_per_scan_parameter needs a hit table, but no hit table found.') return meta_data_table_at_scan_parameter = analysis_utils.get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = analysis_utils.get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = analysis_utils.get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings analysis_utils.index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.info('Analyze hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) analyze_data.reset() # resets the front end data of the last analysis step but not the options readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=best_chunk_size): analyze_data.analyze_hits(hits, scan_parameter=False) # analyze the selected hits in chunks readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction file_name = " ".join(re.findall("[a-zA-Z0-9]+", str(scan_parameters))) + '_' + " ".join(re.findall("[a-zA-Z0-9]+", str(parameter_values[parameter_index]))) analyze_data._create_additional_hit_data(safe_to_file=False) analyze_data._create_additional_cluster_data(safe_to_file=False) yield analyze_data, file_name if close_file: in_hit_file_h5.close()
[ "def", "analyze_hits_per_scan_parameter", "(", "analyze_data", ",", "scan_parameters", "=", "None", ",", "chunk_size", "=", "50000", ")", ":", "if", "analyze_data", ".", "out_file_h5", "is", "None", "or", "analyze_data", ".", "out_file_h5", ".", "isopen", "==", "0", ":", "in_hit_file_h5", "=", "tb", ".", "open_file", "(", "analyze_data", ".", "_analyzed_data_file", ",", "'r+'", ")", "close_file", "=", "True", "else", ":", "in_hit_file_h5", "=", "analyze_data", ".", "out_file_h5", "close_file", "=", "False", "meta_data", "=", "in_hit_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "# get the meta data table", "try", ":", "hit_table", "=", "in_hit_file_h5", ".", "root", ".", "Hits", "# get the hit table", "except", "tb", ".", "NoSuchNodeError", ":", "logging", ".", "error", "(", "'analyze_hits_per_scan_parameter needs a hit table, but no hit table found.'", ")", "return", "meta_data_table_at_scan_parameter", "=", "analysis_utils", ".", "get_unique_scan_parameter_combinations", "(", "meta_data", ",", "scan_parameters", "=", "scan_parameters", ")", "parameter_values", "=", "analysis_utils", ".", "get_scan_parameters_table_from_meta_data", "(", "meta_data_table_at_scan_parameter", ",", "scan_parameters", ")", "event_number_ranges", "=", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_table_at_scan_parameter", "[", "'event_number'", "]", ")", "# get the event number ranges for the different scan parameter settings", "analysis_utils", ".", "index_event_number", "(", "hit_table", ")", "# create a event_numer index to select the hits by their event number fast, no needed but important for speed up", "# variables for read speed up", "index", "=", "0", "# index where to start the read out of the hit table, 0 at the beginning, increased during looping", "best_chunk_size", "=", "chunk_size", "# number of hits to copy to RAM during looping, the optimal chunk size is determined during looping", "# loop over the selected events", "for", "parameter_index", ",", "(", "start_event_number", ",", "stop_event_number", ")", "in", "enumerate", "(", "event_number_ranges", ")", ":", "logging", ".", "info", "(", "'Analyze hits for '", "+", "str", "(", "scan_parameters", ")", "+", "' = '", "+", "str", "(", "parameter_values", "[", "parameter_index", "]", ")", ")", "analyze_data", ".", "reset", "(", ")", "# resets the front end data of the last analysis step but not the options", "readout_hit_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given", "for", "hits", ",", "index", "in", "analysis_utils", ".", "data_aligned_at_events", "(", "hit_table", ",", "start_event_number", "=", "start_event_number", ",", "stop_event_number", "=", "stop_event_number", ",", "start_index", "=", "index", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "analyze_data", ".", "analyze_hits", "(", "hits", ",", "scan_parameter", "=", "False", ")", "# analyze the selected hits in chunks", "readout_hit_len", "+=", "hits", ".", "shape", "[", "0", "]", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_hit_len", ")", "if", "int", "(", "1.05", "*", "readout_hit_len", ")", "<", "chunk_size", "and", "int", "(", "1.05", "*", "readout_hit_len", ")", ">", "1e3", "else", "chunk_size", "# to increase the readout speed, estimated the number of hits for one read instruction", "file_name", "=", "\" \"", ".", "join", "(", "re", ".", "findall", "(", "\"[a-zA-Z0-9]+\"", ",", "str", "(", "scan_parameters", ")", ")", ")", "+", "'_'", "+", "\" \"", ".", "join", "(", "re", ".", "findall", "(", "\"[a-zA-Z0-9]+\"", ",", "str", "(", "parameter_values", "[", "parameter_index", "]", ")", ")", ")", "analyze_data", ".", "_create_additional_hit_data", "(", "safe_to_file", "=", "False", ")", "analyze_data", ".", "_create_additional_cluster_data", "(", "safe_to_file", "=", "False", ")", "yield", "analyze_data", ",", "file_name", "if", "close_file", ":", "in_hit_file_h5", ".", "close", "(", ")" ]
Takes the hit table and analyzes the hits per scan parameter Parameters ---------- analyze_data : analysis.analyze_raw_data.AnalyzeRawData object with an opened hit file (AnalyzeRawData.out_file_h5) or a file name with the hit data given (AnalyzeRawData._analyzed_data_file) scan_parameters : list of strings: The names of the scan parameters to use chunk_size : int: The chunk size of one hit table read. The bigger the faster. Too big causes memory errors. Returns ------- yields the analysis.analyze_raw_data.AnalyzeRawData for each scan parameter
[ "Takes", "the", "hit", "table", "and", "analyzes", "the", "hits", "per", "scan", "parameter" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L485-L541
SiLab-Bonn/pyBAR
pybar/scans/calibrate_plsr_dac_transient.py
interpret_data_from_tektronix
def interpret_data_from_tektronix(preamble, data): ''' Interprets raw data from Tektronix returns: lists of x, y values in seconds/volt''' # Y mode ("WFMPRE:PT_FMT"): # Xn = XZEro + XINcr (n - PT_Off) # Yn = YZEro + YMUlt (yn - YOFf) voltage = np.array(data, dtype=np.float) meta_data = preamble.split(',')[5].split(';') time_unit = meta_data[3][1:-1] XZEro = float(meta_data[5]) XINcr = float(meta_data[4]) PT_Off = float(meta_data[6]) voltage_unit = meta_data[7][1:-1] YZEro = float(meta_data[10]) YMUlt = float(meta_data[8]) YOFf = float(meta_data[9]) time = XZEro + XINcr * (np.arange(0, voltage.size) - PT_Off) voltage = YZEro + YMUlt * (voltage - YOFf) return time, voltage, time_unit, voltage_unit
python
def interpret_data_from_tektronix(preamble, data): ''' Interprets raw data from Tektronix returns: lists of x, y values in seconds/volt''' # Y mode ("WFMPRE:PT_FMT"): # Xn = XZEro + XINcr (n - PT_Off) # Yn = YZEro + YMUlt (yn - YOFf) voltage = np.array(data, dtype=np.float) meta_data = preamble.split(',')[5].split(';') time_unit = meta_data[3][1:-1] XZEro = float(meta_data[5]) XINcr = float(meta_data[4]) PT_Off = float(meta_data[6]) voltage_unit = meta_data[7][1:-1] YZEro = float(meta_data[10]) YMUlt = float(meta_data[8]) YOFf = float(meta_data[9]) time = XZEro + XINcr * (np.arange(0, voltage.size) - PT_Off) voltage = YZEro + YMUlt * (voltage - YOFf) return time, voltage, time_unit, voltage_unit
[ "def", "interpret_data_from_tektronix", "(", "preamble", ",", "data", ")", ":", "# Y mode (\"WFMPRE:PT_FMT\"):", "# Xn = XZEro + XINcr (n - PT_Off)", "# Yn = YZEro + YMUlt (yn - YOFf)", "voltage", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "np", ".", "float", ")", "meta_data", "=", "preamble", ".", "split", "(", "','", ")", "[", "5", "]", ".", "split", "(", "';'", ")", "time_unit", "=", "meta_data", "[", "3", "]", "[", "1", ":", "-", "1", "]", "XZEro", "=", "float", "(", "meta_data", "[", "5", "]", ")", "XINcr", "=", "float", "(", "meta_data", "[", "4", "]", ")", "PT_Off", "=", "float", "(", "meta_data", "[", "6", "]", ")", "voltage_unit", "=", "meta_data", "[", "7", "]", "[", "1", ":", "-", "1", "]", "YZEro", "=", "float", "(", "meta_data", "[", "10", "]", ")", "YMUlt", "=", "float", "(", "meta_data", "[", "8", "]", ")", "YOFf", "=", "float", "(", "meta_data", "[", "9", "]", ")", "time", "=", "XZEro", "+", "XINcr", "*", "(", "np", ".", "arange", "(", "0", ",", "voltage", ".", "size", ")", "-", "PT_Off", ")", "voltage", "=", "YZEro", "+", "YMUlt", "*", "(", "voltage", "-", "YOFf", ")", "return", "time", ",", "voltage", ",", "time_unit", ",", "voltage_unit" ]
Interprets raw data from Tektronix returns: lists of x, y values in seconds/volt
[ "Interprets", "raw", "data", "from", "Tektronix", "returns", ":", "lists", "of", "x", "y", "values", "in", "seconds", "/", "volt" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/scans/calibrate_plsr_dac_transient.py#L24-L42
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
read_chip_sn
def read_chip_sn(self): '''Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number ''' commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) with self.readout(fill_buffer=True, callback=None, errback=None): if self.register.fei4b: commands = [] self.register.set_global_register_value('Efuse_Sense', 1) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('Efuse_Sense', 0) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) self.register_utils.send_commands(commands) commands = [] self.register.set_global_register_value('Conf_AddrEnable', 1) commands.extend(self.register.get_commands("WrRegister", name=['Conf_AddrEnable'])) chip_sn_address = self.register.get_global_register_attributes("addresses", name="Chip_SN") commands.extend(self.register.get_commands("RdRegister", addresses=chip_sn_address)) self.register_utils.send_commands(commands) data = self.read_data() if data.shape[0] == 0: logging.error('Chip S/N: No data') return read_values = [] for index, word in enumerate(np.nditer(data)): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': fei4_next_data_word = FEI4Record(data[index + 1], self.register.chip_flavor) if fei4_next_data_word == 'VR': read_value = fei4_next_data_word['value'] read_values.append(read_value) # commands = [] # commands.extend(self.register.get_commands("RunMode")) # self.register_utils.send_commands(commands) if len(read_values) == 0: logging.error('No Chip S/N was found') elif len(read_values) == 1: logging.info('Chip S/N: %d', read_values[0]) else: logging.warning('Ambiguous Chip S/N: %s', read_values)
python
def read_chip_sn(self): '''Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number ''' commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) with self.readout(fill_buffer=True, callback=None, errback=None): if self.register.fei4b: commands = [] self.register.set_global_register_value('Efuse_Sense', 1) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('Efuse_Sense', 0) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) self.register_utils.send_commands(commands) commands = [] self.register.set_global_register_value('Conf_AddrEnable', 1) commands.extend(self.register.get_commands("WrRegister", name=['Conf_AddrEnable'])) chip_sn_address = self.register.get_global_register_attributes("addresses", name="Chip_SN") commands.extend(self.register.get_commands("RdRegister", addresses=chip_sn_address)) self.register_utils.send_commands(commands) data = self.read_data() if data.shape[0] == 0: logging.error('Chip S/N: No data') return read_values = [] for index, word in enumerate(np.nditer(data)): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': fei4_next_data_word = FEI4Record(data[index + 1], self.register.chip_flavor) if fei4_next_data_word == 'VR': read_value = fei4_next_data_word['value'] read_values.append(read_value) # commands = [] # commands.extend(self.register.get_commands("RunMode")) # self.register_utils.send_commands(commands) if len(read_values) == 0: logging.error('No Chip S/N was found') elif len(read_values) == 1: logging.info('Chip S/N: %d', read_values[0]) else: logging.warning('Ambiguous Chip S/N: %s', read_values)
[ "def", "read_chip_sn", "(", "self", ")", ":", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "with", "self", ".", "readout", "(", "fill_buffer", "=", "True", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "if", "self", ".", "register", ".", "fei4b", ":", "commands", "=", "[", "]", "self", ".", "register", ".", "set_global_register_value", "(", "'Efuse_Sense'", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "'Efuse_Sense'", "]", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"GlobalPulse\"", ",", "Width", "=", "0", ")", ")", "self", ".", "register", ".", "set_global_register_value", "(", "'Efuse_Sense'", ",", "0", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "'Efuse_Sense'", "]", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "commands", "=", "[", "]", "self", ".", "register", ".", "set_global_register_value", "(", "'Conf_AddrEnable'", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "'Conf_AddrEnable'", "]", ")", ")", "chip_sn_address", "=", "self", ".", "register", ".", "get_global_register_attributes", "(", "\"addresses\"", ",", "name", "=", "\"Chip_SN\"", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"RdRegister\"", ",", "addresses", "=", "chip_sn_address", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "data", "=", "self", ".", "read_data", "(", ")", "if", "data", ".", "shape", "[", "0", "]", "==", "0", ":", "logging", ".", "error", "(", "'Chip S/N: No data'", ")", "return", "read_values", "=", "[", "]", "for", "index", ",", "word", "in", "enumerate", "(", "np", ".", "nditer", "(", "data", ")", ")", ":", "fei4_data_word", "=", "FEI4Record", "(", "word", ",", "self", ".", "register", ".", "chip_flavor", ")", "if", "fei4_data_word", "==", "'AR'", ":", "fei4_next_data_word", "=", "FEI4Record", "(", "data", "[", "index", "+", "1", "]", ",", "self", ".", "register", ".", "chip_flavor", ")", "if", "fei4_next_data_word", "==", "'VR'", ":", "read_value", "=", "fei4_next_data_word", "[", "'value'", "]", "read_values", ".", "append", "(", "read_value", ")", "# commands = []\r", "# commands.extend(self.register.get_commands(\"RunMode\"))\r", "# self.register_utils.send_commands(commands)\r", "if", "len", "(", "read_values", ")", "==", "0", ":", "logging", ".", "error", "(", "'No Chip S/N was found'", ")", "elif", "len", "(", "read_values", ")", "==", "1", ":", "logging", ".", "info", "(", "'Chip S/N: %d'", ",", "read_values", "[", "0", "]", ")", "else", ":", "logging", ".", "warning", "(", "'Ambiguous Chip S/N: %s'", ",", "read_values", ")" ]
Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number
[ "Reading", "Chip", "S", "/", "N", "Note", "----", "Bits", "[", "MSB", "-", "LSB", "]", "|", "[", "15", "]", "|", "[", "14", "-", "6", "]", "|", "[", "5", "-", "0", "]", "Content", "|", "reserved", "|", "wafer", "number", "|", "chip", "number" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L264-L313
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
read_global_register
def read_global_register(self, name, overwrite_config=False): '''The function reads the global register, interprets the data and returns the register value. Parameters ---------- name : register name overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- register value ''' self.register_utils.send_commands(self.register.get_commands("ConfMode")) with self.readout(fill_buffer=True, callback=None, errback=None): commands = [] commands.extend(self.register.get_commands("RdRegister", name=name)) self.register_utils.send_commands(commands) data = self.read_data() register_object = self.register.get_global_register_objects(name=[name])[0] value = BitLogic(register_object['addresses'] * 16) index = 0 vr_count = 0 for word in np.nditer(data): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': address_value = fei4_data_word['address'] if address_value != register_object['address'] + index: raise Exception('Unexpected address from Address Record: read: %d, expected: %d' % (address_value, register_object['address'] + index)) elif fei4_data_word == 'VR': vr_count += 1 if vr_count >= 2: raise RuntimeError("Read more than 2 value records") read_value = BitLogic.from_value(fei4_data_word['value'], size=16) if register_object['register_littleendian']: read_value.reverse() value[index * 16 + 15:index * 16] = read_value index += 1 value = value[register_object['bitlength'] + register_object['offset'] - 1:register_object['offset']] if register_object['littleendian']: value.reverse() value = value.tovalue() if overwrite_config: self.register.set_global_register(name, value) return value
python
def read_global_register(self, name, overwrite_config=False): '''The function reads the global register, interprets the data and returns the register value. Parameters ---------- name : register name overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- register value ''' self.register_utils.send_commands(self.register.get_commands("ConfMode")) with self.readout(fill_buffer=True, callback=None, errback=None): commands = [] commands.extend(self.register.get_commands("RdRegister", name=name)) self.register_utils.send_commands(commands) data = self.read_data() register_object = self.register.get_global_register_objects(name=[name])[0] value = BitLogic(register_object['addresses'] * 16) index = 0 vr_count = 0 for word in np.nditer(data): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': address_value = fei4_data_word['address'] if address_value != register_object['address'] + index: raise Exception('Unexpected address from Address Record: read: %d, expected: %d' % (address_value, register_object['address'] + index)) elif fei4_data_word == 'VR': vr_count += 1 if vr_count >= 2: raise RuntimeError("Read more than 2 value records") read_value = BitLogic.from_value(fei4_data_word['value'], size=16) if register_object['register_littleendian']: read_value.reverse() value[index * 16 + 15:index * 16] = read_value index += 1 value = value[register_object['bitlength'] + register_object['offset'] - 1:register_object['offset']] if register_object['littleendian']: value.reverse() value = value.tovalue() if overwrite_config: self.register.set_global_register(name, value) return value
[ "def", "read_global_register", "(", "self", ",", "name", ",", "overwrite_config", "=", "False", ")", ":", "self", ".", "register_utils", ".", "send_commands", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "with", "self", ".", "readout", "(", "fill_buffer", "=", "True", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"RdRegister\"", ",", "name", "=", "name", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "data", "=", "self", ".", "read_data", "(", ")", "register_object", "=", "self", ".", "register", ".", "get_global_register_objects", "(", "name", "=", "[", "name", "]", ")", "[", "0", "]", "value", "=", "BitLogic", "(", "register_object", "[", "'addresses'", "]", "*", "16", ")", "index", "=", "0", "vr_count", "=", "0", "for", "word", "in", "np", ".", "nditer", "(", "data", ")", ":", "fei4_data_word", "=", "FEI4Record", "(", "word", ",", "self", ".", "register", ".", "chip_flavor", ")", "if", "fei4_data_word", "==", "'AR'", ":", "address_value", "=", "fei4_data_word", "[", "'address'", "]", "if", "address_value", "!=", "register_object", "[", "'address'", "]", "+", "index", ":", "raise", "Exception", "(", "'Unexpected address from Address Record: read: %d, expected: %d'", "%", "(", "address_value", ",", "register_object", "[", "'address'", "]", "+", "index", ")", ")", "elif", "fei4_data_word", "==", "'VR'", ":", "vr_count", "+=", "1", "if", "vr_count", ">=", "2", ":", "raise", "RuntimeError", "(", "\"Read more than 2 value records\"", ")", "read_value", "=", "BitLogic", ".", "from_value", "(", "fei4_data_word", "[", "'value'", "]", ",", "size", "=", "16", ")", "if", "register_object", "[", "'register_littleendian'", "]", ":", "read_value", ".", "reverse", "(", ")", "value", "[", "index", "*", "16", "+", "15", ":", "index", "*", "16", "]", "=", "read_value", "index", "+=", "1", "value", "=", "value", "[", "register_object", "[", "'bitlength'", "]", "+", "register_object", "[", "'offset'", "]", "-", "1", ":", "register_object", "[", "'offset'", "]", "]", "if", "register_object", "[", "'littleendian'", "]", ":", "value", ".", "reverse", "(", ")", "value", "=", "value", ".", "tovalue", "(", ")", "if", "overwrite_config", ":", "self", ".", "register", ".", "set_global_register", "(", "name", ",", "value", ")", "return", "value" ]
The function reads the global register, interprets the data and returns the register value. Parameters ---------- name : register name overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- register value
[ "The", "function", "reads", "the", "global", "register", "interprets", "the", "data", "and", "returns", "the", "register", "value", ".", "Parameters", "----------", "name", ":", "register", "name", "overwrite_config", ":", "bool", "The", "read", "values", "overwrite", "the", "config", "in", "RAM", "if", "true", ".", "Returns", "-------", "register", "value" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L540-L586
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
read_pixel_register
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False): '''The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register. Pixels without any data are masked. Parameters ---------- pix_regs : iterable, string List of pixel register to read (e.g. Enable, C_High, ...). If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" dcs : iterable, int List of double columns to read. overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- list of masked numpy.ndarrays ''' if pix_regs is None: pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"] self.register_utils.send_commands(self.register.get_commands("ConfMode")) result = [] for pix_reg in pix_regs: pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True) # the result pixel array, only pixel with data are not masked for dc in dcs: with self.readout(fill_buffer=True, callback=None, errback=None): self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc])) data = self.read_data() interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True) if overwrite_config: self.register.set_pixel_register(pix_reg, pixel_data.data) result.append(pixel_data) return result
python
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False): '''The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register. Pixels without any data are masked. Parameters ---------- pix_regs : iterable, string List of pixel register to read (e.g. Enable, C_High, ...). If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" dcs : iterable, int List of double columns to read. overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- list of masked numpy.ndarrays ''' if pix_regs is None: pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"] self.register_utils.send_commands(self.register.get_commands("ConfMode")) result = [] for pix_reg in pix_regs: pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True) # the result pixel array, only pixel with data are not masked for dc in dcs: with self.readout(fill_buffer=True, callback=None, errback=None): self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc])) data = self.read_data() interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True) if overwrite_config: self.register.set_pixel_register(pix_reg, pixel_data.data) result.append(pixel_data) return result
[ "def", "read_pixel_register", "(", "self", ",", "pix_regs", "=", "None", ",", "dcs", "=", "range", "(", "40", ")", ",", "overwrite_config", "=", "False", ")", ":", "if", "pix_regs", "is", "None", ":", "pix_regs", "=", "[", "\"EnableDigInj\"", ",", "\"Imon\"", ",", "\"Enable\"", ",", "\"C_High\"", ",", "\"C_Low\"", ",", "\"TDAC\"", ",", "\"FDAC\"", "]", "self", ".", "register_utils", ".", "send_commands", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "result", "=", "[", "]", "for", "pix_reg", "in", "pix_regs", ":", "pixel_data", "=", "np", ".", "ma", ".", "masked_array", "(", "np", ".", "zeros", "(", "shape", "=", "(", "80", ",", "336", ")", ",", "dtype", "=", "np", ".", "uint32", ")", ",", "mask", "=", "True", ")", "# the result pixel array, only pixel with data are not masked\r", "for", "dc", "in", "dcs", ":", "with", "self", ".", "readout", "(", "fill_buffer", "=", "True", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "self", ".", "register_utils", ".", "send_commands", "(", "self", ".", "register", ".", "get_commands", "(", "\"RdFrontEnd\"", ",", "name", "=", "[", "pix_reg", "]", ",", "dcs", "=", "[", "dc", "]", ")", ")", "data", "=", "self", ".", "read_data", "(", ")", "interpret_pixel_data", "(", "data", ",", "dc", ",", "pixel_data", ",", "invert", "=", "False", "if", "pix_reg", "==", "\"EnableDigInj\"", "else", "True", ")", "if", "overwrite_config", ":", "self", ".", "register", ".", "set_pixel_register", "(", "pix_reg", ",", "pixel_data", ".", "data", ")", "result", ".", "append", "(", "pixel_data", ")", "return", "result" ]
The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register. Pixels without any data are masked. Parameters ---------- pix_regs : iterable, string List of pixel register to read (e.g. Enable, C_High, ...). If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC" dcs : iterable, int List of double columns to read. overwrite_config : bool The read values overwrite the config in RAM if true. Returns ------- list of masked numpy.ndarrays
[ "The", "function", "reads", "the", "pixel", "register", "interprets", "the", "data", "and", "returns", "a", "masked", "numpy", "arrays", "with", "the", "data", "for", "the", "chosen", "pixel", "register", ".", "Pixels", "without", "any", "data", "are", "masked", ".", "Parameters", "----------", "pix_regs", ":", "iterable", "string", "List", "of", "pixel", "register", "to", "read", "(", "e", ".", "g", ".", "Enable", "C_High", "...", ")", ".", "If", "None", "all", "are", "read", ":", "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC", "dcs", ":", "iterable", "int", "List", "of", "double", "columns", "to", "read", ".", "overwrite_config", ":", "bool", "The", "read", "values", "overwrite", "the", "config", "in", "RAM", "if", "true", ".", "Returns", "-------", "list", "of", "masked", "numpy", ".", "ndarrays" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L598-L633
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
is_fe_ready
def is_fe_ready(self): '''Get FEI4 status of module. If FEI4 is not ready, resetting service records is necessary to bring the FEI4 to a defined state. Returns ------- value : bool True if FEI4 is ready, False if the FEI4 was powered up recently and is not ready. ''' with self.readout(fill_buffer=True, callback=None, errback=None): commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("RdRegister", address=[1])) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands) data = self.read_data() if len(data) != 0: return True if FEI4Record(data[-1], self.register.chip_flavor) == 'VR' else False else: return False
python
def is_fe_ready(self): '''Get FEI4 status of module. If FEI4 is not ready, resetting service records is necessary to bring the FEI4 to a defined state. Returns ------- value : bool True if FEI4 is ready, False if the FEI4 was powered up recently and is not ready. ''' with self.readout(fill_buffer=True, callback=None, errback=None): commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("RdRegister", address=[1])) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands) data = self.read_data() if len(data) != 0: return True if FEI4Record(data[-1], self.register.chip_flavor) == 'VR' else False else: return False
[ "def", "is_fe_ready", "(", "self", ")", ":", "with", "self", ".", "readout", "(", "fill_buffer", "=", "True", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"RdRegister\"", ",", "address", "=", "[", "1", "]", ")", ")", "# commands.extend(self.register.get_commands(\"RunMode\"))\r", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "data", "=", "self", ".", "read_data", "(", ")", "if", "len", "(", "data", ")", "!=", "0", ":", "return", "True", "if", "FEI4Record", "(", "data", "[", "-", "1", "]", ",", "self", ".", "register", ".", "chip_flavor", ")", "==", "'VR'", "else", "False", "else", ":", "return", "False" ]
Get FEI4 status of module. If FEI4 is not ready, resetting service records is necessary to bring the FEI4 to a defined state. Returns ------- value : bool True if FEI4 is ready, False if the FEI4 was powered up recently and is not ready.
[ "Get", "FEI4", "status", "of", "module", ".", "If", "FEI4", "is", "not", "ready", "resetting", "service", "records", "is", "necessary", "to", "bring", "the", "FEI4", "to", "a", "defined", "state", ".", "Returns", "-------", "value", ":", "bool", "True", "if", "FEI4", "is", "ready", "False", "if", "the", "FEI4", "was", "powered", "up", "recently", "and", "is", "not", "ready", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L636-L657
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
invert_pixel_mask
def invert_pixel_mask(mask): '''Invert pixel mask (0->1, 1(and greater)->0). Parameters ---------- mask : array-like Mask. Returns ------- inverted_mask : array-like Inverted Mask. ''' inverted_mask = np.ones(shape=(80, 336), dtype=np.dtype('>u1')) inverted_mask[mask >= 1] = 0 return inverted_mask
python
def invert_pixel_mask(mask): '''Invert pixel mask (0->1, 1(and greater)->0). Parameters ---------- mask : array-like Mask. Returns ------- inverted_mask : array-like Inverted Mask. ''' inverted_mask = np.ones(shape=(80, 336), dtype=np.dtype('>u1')) inverted_mask[mask >= 1] = 0 return inverted_mask
[ "def", "invert_pixel_mask", "(", "mask", ")", ":", "inverted_mask", "=", "np", ".", "ones", "(", "shape", "=", "(", "80", ",", "336", ")", ",", "dtype", "=", "np", ".", "dtype", "(", "'>u1'", ")", ")", "inverted_mask", "[", "mask", ">=", "1", "]", "=", "0", "return", "inverted_mask" ]
Invert pixel mask (0->1, 1(and greater)->0). Parameters ---------- mask : array-like Mask. Returns ------- inverted_mask : array-like Inverted Mask.
[ "Invert", "pixel", "mask", "(", "0", "-", ">", "1", "1", "(", "and", "greater", ")", "-", ">", "0", ")", ".", "Parameters", "----------", "mask", ":", "array", "-", "like", "Mask", ".", "Returns", "-------", "inverted_mask", ":", "array", "-", "like", "Inverted", "Mask", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L660-L675
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
make_pixel_mask
def make_pixel_mask(steps, shift, default=0, value=1, enable_columns=None, mask=None): '''Generate pixel mask. Parameters ---------- steps : int Number of mask steps, e.g. steps=3 (every third pixel is enabled), steps=336 (one pixel per column), steps=672 (one pixel per double column). shift : int Shift mask by given value to the bottom (towards higher row numbers). From 0 to (steps - 1). default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. enable_columns : list List of columns where the shift mask will be applied. List elements can range from 1 to 80. mask : array_like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked (i.e. invalid) data. Masked pixels will be set to default value. Returns ------- mask_array : numpy.ndarray Mask array. Usage ----- shift_mask = 'enable' steps = 3 # three step mask for mask_step in range(steps): commands = [] commands.extend(self.register.get_commands("ConfMode")) mask_array = make_pixel_mask(steps=steps, step=mask_step) self.register.set_pixel_register_value(shift_mask, mask_array) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=shift_mask)) self.register_utils.send_commands(commands) # do something here ''' shape = (80, 336) # value = np.zeros(dimension, dtype = np.uint8) mask_array = np.full(shape, default, dtype=np.uint8) # FE columns and rows are starting from 1 if enable_columns: odd_columns = [odd - 1 for odd in enable_columns if odd % 2 != 0] even_columns = [even - 1 for even in enable_columns if even % 2 == 0] else: odd_columns = range(0, 80, 2) even_columns = range(1, 80, 2) odd_rows = np.arange(shift % steps, 336, steps) even_row_offset = ((steps // 2) + shift) % steps # // integer devision even_rows = np.arange(even_row_offset, 336, steps) if odd_columns: odd_col_rows = itertools.product(odd_columns, odd_rows) # get any combination of column and row, no for loop needed for odd_col_row in odd_col_rows: mask_array[odd_col_row[0], odd_col_row[1]] = value # advanced indexing if even_columns: even_col_rows = itertools.product(even_columns, even_rows) for even_col_row in even_col_rows: mask_array[even_col_row[0], even_col_row[1]] = value if mask is not None: mask_array = np.ma.array(mask_array, mask=mask, fill_value=default) mask_array = mask_array.filled() return mask_array
python
def make_pixel_mask(steps, shift, default=0, value=1, enable_columns=None, mask=None): '''Generate pixel mask. Parameters ---------- steps : int Number of mask steps, e.g. steps=3 (every third pixel is enabled), steps=336 (one pixel per column), steps=672 (one pixel per double column). shift : int Shift mask by given value to the bottom (towards higher row numbers). From 0 to (steps - 1). default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. enable_columns : list List of columns where the shift mask will be applied. List elements can range from 1 to 80. mask : array_like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked (i.e. invalid) data. Masked pixels will be set to default value. Returns ------- mask_array : numpy.ndarray Mask array. Usage ----- shift_mask = 'enable' steps = 3 # three step mask for mask_step in range(steps): commands = [] commands.extend(self.register.get_commands("ConfMode")) mask_array = make_pixel_mask(steps=steps, step=mask_step) self.register.set_pixel_register_value(shift_mask, mask_array) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=shift_mask)) self.register_utils.send_commands(commands) # do something here ''' shape = (80, 336) # value = np.zeros(dimension, dtype = np.uint8) mask_array = np.full(shape, default, dtype=np.uint8) # FE columns and rows are starting from 1 if enable_columns: odd_columns = [odd - 1 for odd in enable_columns if odd % 2 != 0] even_columns = [even - 1 for even in enable_columns if even % 2 == 0] else: odd_columns = range(0, 80, 2) even_columns = range(1, 80, 2) odd_rows = np.arange(shift % steps, 336, steps) even_row_offset = ((steps // 2) + shift) % steps # // integer devision even_rows = np.arange(even_row_offset, 336, steps) if odd_columns: odd_col_rows = itertools.product(odd_columns, odd_rows) # get any combination of column and row, no for loop needed for odd_col_row in odd_col_rows: mask_array[odd_col_row[0], odd_col_row[1]] = value # advanced indexing if even_columns: even_col_rows = itertools.product(even_columns, even_rows) for even_col_row in even_col_rows: mask_array[even_col_row[0], even_col_row[1]] = value if mask is not None: mask_array = np.ma.array(mask_array, mask=mask, fill_value=default) mask_array = mask_array.filled() return mask_array
[ "def", "make_pixel_mask", "(", "steps", ",", "shift", ",", "default", "=", "0", ",", "value", "=", "1", ",", "enable_columns", "=", "None", ",", "mask", "=", "None", ")", ":", "shape", "=", "(", "80", ",", "336", ")", "# value = np.zeros(dimension, dtype = np.uint8)\r", "mask_array", "=", "np", ".", "full", "(", "shape", ",", "default", ",", "dtype", "=", "np", ".", "uint8", ")", "# FE columns and rows are starting from 1\r", "if", "enable_columns", ":", "odd_columns", "=", "[", "odd", "-", "1", "for", "odd", "in", "enable_columns", "if", "odd", "%", "2", "!=", "0", "]", "even_columns", "=", "[", "even", "-", "1", "for", "even", "in", "enable_columns", "if", "even", "%", "2", "==", "0", "]", "else", ":", "odd_columns", "=", "range", "(", "0", ",", "80", ",", "2", ")", "even_columns", "=", "range", "(", "1", ",", "80", ",", "2", ")", "odd_rows", "=", "np", ".", "arange", "(", "shift", "%", "steps", ",", "336", ",", "steps", ")", "even_row_offset", "=", "(", "(", "steps", "//", "2", ")", "+", "shift", ")", "%", "steps", "# // integer devision\r", "even_rows", "=", "np", ".", "arange", "(", "even_row_offset", ",", "336", ",", "steps", ")", "if", "odd_columns", ":", "odd_col_rows", "=", "itertools", ".", "product", "(", "odd_columns", ",", "odd_rows", ")", "# get any combination of column and row, no for loop needed\r", "for", "odd_col_row", "in", "odd_col_rows", ":", "mask_array", "[", "odd_col_row", "[", "0", "]", ",", "odd_col_row", "[", "1", "]", "]", "=", "value", "# advanced indexing\r", "if", "even_columns", ":", "even_col_rows", "=", "itertools", ".", "product", "(", "even_columns", ",", "even_rows", ")", "for", "even_col_row", "in", "even_col_rows", ":", "mask_array", "[", "even_col_row", "[", "0", "]", ",", "even_col_row", "[", "1", "]", "]", "=", "value", "if", "mask", "is", "not", "None", ":", "mask_array", "=", "np", ".", "ma", ".", "array", "(", "mask_array", ",", "mask", "=", "mask", ",", "fill_value", "=", "default", ")", "mask_array", "=", "mask_array", ".", "filled", "(", ")", "return", "mask_array" ]
Generate pixel mask. Parameters ---------- steps : int Number of mask steps, e.g. steps=3 (every third pixel is enabled), steps=336 (one pixel per column), steps=672 (one pixel per double column). shift : int Shift mask by given value to the bottom (towards higher row numbers). From 0 to (steps - 1). default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. enable_columns : list List of columns where the shift mask will be applied. List elements can range from 1 to 80. mask : array_like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked (i.e. invalid) data. Masked pixels will be set to default value. Returns ------- mask_array : numpy.ndarray Mask array. Usage ----- shift_mask = 'enable' steps = 3 # three step mask for mask_step in range(steps): commands = [] commands.extend(self.register.get_commands("ConfMode")) mask_array = make_pixel_mask(steps=steps, step=mask_step) self.register.set_pixel_register_value(shift_mask, mask_array) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=shift_mask)) self.register_utils.send_commands(commands) # do something here
[ "Generate", "pixel", "mask", ".", "Parameters", "----------", "steps", ":", "int", "Number", "of", "mask", "steps", "e", ".", "g", ".", "steps", "=", "3", "(", "every", "third", "pixel", "is", "enabled", ")", "steps", "=", "336", "(", "one", "pixel", "per", "column", ")", "steps", "=", "672", "(", "one", "pixel", "per", "double", "column", ")", ".", "shift", ":", "int", "Shift", "mask", "by", "given", "value", "to", "the", "bottom", "(", "towards", "higher", "row", "numbers", ")", ".", "From", "0", "to", "(", "steps", "-", "1", ")", ".", "default", ":", "int", "Value", "of", "pixels", "that", "are", "not", "selected", "by", "the", "mask", ".", "value", ":", "int", "Value", "of", "pixels", "that", "are", "selected", "by", "the", "mask", ".", "enable_columns", ":", "list", "List", "of", "columns", "where", "the", "shift", "mask", "will", "be", "applied", ".", "List", "elements", "can", "range", "from", "1", "to", "80", ".", "mask", ":", "array_like", "Additional", "mask", ".", "Must", "be", "convertible", "to", "an", "array", "of", "booleans", "with", "the", "same", "shape", "as", "mask", "array", ".", "True", "indicates", "a", "masked", "(", "i", ".", "e", ".", "invalid", ")", "data", ".", "Masked", "pixels", "will", "be", "set", "to", "default", "value", ".", "Returns", "-------", "mask_array", ":", "numpy", ".", "ndarray", "Mask", "array", ".", "Usage", "-----", "shift_mask", "=", "enable", "steps", "=", "3", "#", "three", "step", "mask", "for", "mask_step", "in", "range", "(", "steps", ")", ":", "commands", "=", "[]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "ConfMode", "))", "mask_array", "=", "make_pixel_mask", "(", "steps", "=", "steps", "step", "=", "mask_step", ")", "self", ".", "register", ".", "set_pixel_register_value", "(", "shift_mask", "mask_array", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "WrFrontEnd", "same_mask_for_all_dc", "=", "True", "name", "=", "shift_mask", "))", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "#", "do", "something", "here" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L678-L738
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
make_pixel_mask_from_col_row
def make_pixel_mask_from_col_row(column, row, default=0, value=1): '''Generate mask from column and row lists Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- mask : numpy.ndarray ''' # FE columns and rows start from 1 col_array = np.array(column) - 1 row_array = np.array(row) - 1 if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0): raise ValueError('Column and/or row out of range') shape = (80, 336) mask = np.full(shape, default, dtype=np.uint8) mask[col_array, row_array] = value # advanced indexing return mask
python
def make_pixel_mask_from_col_row(column, row, default=0, value=1): '''Generate mask from column and row lists Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- mask : numpy.ndarray ''' # FE columns and rows start from 1 col_array = np.array(column) - 1 row_array = np.array(row) - 1 if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0): raise ValueError('Column and/or row out of range') shape = (80, 336) mask = np.full(shape, default, dtype=np.uint8) mask[col_array, row_array] = value # advanced indexing return mask
[ "def", "make_pixel_mask_from_col_row", "(", "column", ",", "row", ",", "default", "=", "0", ",", "value", "=", "1", ")", ":", "# FE columns and rows start from 1\r", "col_array", "=", "np", ".", "array", "(", "column", ")", "-", "1", "row_array", "=", "np", ".", "array", "(", "row", ")", "-", "1", "if", "np", ".", "any", "(", "col_array", ">=", "80", ")", "or", "np", ".", "any", "(", "col_array", "<", "0", ")", "or", "np", ".", "any", "(", "row_array", ">=", "336", ")", "or", "np", ".", "any", "(", "row_array", "<", "0", ")", ":", "raise", "ValueError", "(", "'Column and/or row out of range'", ")", "shape", "=", "(", "80", ",", "336", ")", "mask", "=", "np", ".", "full", "(", "shape", ",", "default", ",", "dtype", "=", "np", ".", "uint8", ")", "mask", "[", "col_array", ",", "row_array", "]", "=", "value", "# advanced indexing\r", "return", "mask" ]
Generate mask from column and row lists Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- mask : numpy.ndarray
[ "Generate", "mask", "from", "column", "and", "row", "lists", "Parameters", "----------", "column", ":", "iterable", "int", "List", "of", "colums", "values", ".", "row", ":", "iterable", "int", "List", "of", "row", "values", ".", "default", ":", "int", "Value", "of", "pixels", "that", "are", "not", "selected", "by", "the", "mask", ".", "value", ":", "int", "Value", "of", "pixels", "that", "are", "selected", "by", "the", "mask", ".", "Returns", "-------", "mask", ":", "numpy", ".", "ndarray" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L741-L767
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
make_box_pixel_mask_from_col_row
def make_box_pixel_mask_from_col_row(column, row, default=0, value=1): '''Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list. Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- numpy.ndarray ''' # FE columns and rows start from 1 col_array = np.array(column) - 1 row_array = np.array(row) - 1 if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0): raise ValueError('Column and/or row out of range') shape = (80, 336) mask = np.full(shape, default, dtype=np.uint8) if column and row: mask[col_array.min():col_array.max() + 1, row_array.min():row_array.max() + 1] = value # advanced indexing return mask
python
def make_box_pixel_mask_from_col_row(column, row, default=0, value=1): '''Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list. Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- numpy.ndarray ''' # FE columns and rows start from 1 col_array = np.array(column) - 1 row_array = np.array(row) - 1 if np.any(col_array >= 80) or np.any(col_array < 0) or np.any(row_array >= 336) or np.any(row_array < 0): raise ValueError('Column and/or row out of range') shape = (80, 336) mask = np.full(shape, default, dtype=np.uint8) if column and row: mask[col_array.min():col_array.max() + 1, row_array.min():row_array.max() + 1] = value # advanced indexing return mask
[ "def", "make_box_pixel_mask_from_col_row", "(", "column", ",", "row", ",", "default", "=", "0", ",", "value", "=", "1", ")", ":", "# FE columns and rows start from 1\r", "col_array", "=", "np", ".", "array", "(", "column", ")", "-", "1", "row_array", "=", "np", ".", "array", "(", "row", ")", "-", "1", "if", "np", ".", "any", "(", "col_array", ">=", "80", ")", "or", "np", ".", "any", "(", "col_array", "<", "0", ")", "or", "np", ".", "any", "(", "row_array", ">=", "336", ")", "or", "np", ".", "any", "(", "row_array", "<", "0", ")", ":", "raise", "ValueError", "(", "'Column and/or row out of range'", ")", "shape", "=", "(", "80", ",", "336", ")", "mask", "=", "np", ".", "full", "(", "shape", ",", "default", ",", "dtype", "=", "np", ".", "uint8", ")", "if", "column", "and", "row", ":", "mask", "[", "col_array", ".", "min", "(", ")", ":", "col_array", ".", "max", "(", ")", "+", "1", ",", "row_array", ".", "min", "(", ")", ":", "row_array", ".", "max", "(", ")", "+", "1", "]", "=", "value", "# advanced indexing\r", "return", "mask" ]
Generate box shaped mask from column and row lists. Takes the minimum and maximum value from each list. Parameters ---------- column : iterable, int List of colums values. row : iterable, int List of row values. default : int Value of pixels that are not selected by the mask. value : int Value of pixels that are selected by the mask. Returns ------- numpy.ndarray
[ "Generate", "box", "shaped", "mask", "from", "column", "and", "row", "lists", ".", "Takes", "the", "minimum", "and", "maximum", "value", "from", "each", "list", ".", "Parameters", "----------", "column", ":", "iterable", "int", "List", "of", "colums", "values", ".", "row", ":", "iterable", "int", "List", "of", "row", "values", ".", "default", ":", "int", "Value", "of", "pixels", "that", "are", "not", "selected", "by", "the", "mask", ".", "value", ":", "int", "Value", "of", "pixels", "that", "are", "selected", "by", "the", "mask", ".", "Returns", "-------", "numpy", ".", "ndarray" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L770-L797
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
make_xtalk_mask
def make_xtalk_mask(mask): """ Generate xtalk mask (row - 1, row + 1) from pixel mask. Parameters ---------- mask : ndarray Pixel mask. Returns ------- ndarray Xtalk mask. Example ------- Input: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0]] Output: [[0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0] ... [0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0]] """ col, row = mask.nonzero() row_plus_one = row + 1 del_index = np.where(row_plus_one > 335) row_plus_one = np.delete(row_plus_one, del_index) col_plus_one = np.delete(col.copy(), del_index) row_minus_one = row - 1 del_index = np.where(row_minus_one > 335) row_minus_one = np.delete(row_minus_one, del_index) col_minus_one = np.delete(col.copy(), del_index) col = np.concatenate((col_plus_one, col_minus_one)) row = np.concatenate((row_plus_one, row_minus_one)) return make_pixel_mask_from_col_row(col + 1, row + 1)
python
def make_xtalk_mask(mask): """ Generate xtalk mask (row - 1, row + 1) from pixel mask. Parameters ---------- mask : ndarray Pixel mask. Returns ------- ndarray Xtalk mask. Example ------- Input: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0]] Output: [[0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0] ... [0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0]] """ col, row = mask.nonzero() row_plus_one = row + 1 del_index = np.where(row_plus_one > 335) row_plus_one = np.delete(row_plus_one, del_index) col_plus_one = np.delete(col.copy(), del_index) row_minus_one = row - 1 del_index = np.where(row_minus_one > 335) row_minus_one = np.delete(row_minus_one, del_index) col_minus_one = np.delete(col.copy(), del_index) col = np.concatenate((col_plus_one, col_minus_one)) row = np.concatenate((row_plus_one, row_minus_one)) return make_pixel_mask_from_col_row(col + 1, row + 1)
[ "def", "make_xtalk_mask", "(", "mask", ")", ":", "col", ",", "row", "=", "mask", ".", "nonzero", "(", ")", "row_plus_one", "=", "row", "+", "1", "del_index", "=", "np", ".", "where", "(", "row_plus_one", ">", "335", ")", "row_plus_one", "=", "np", ".", "delete", "(", "row_plus_one", ",", "del_index", ")", "col_plus_one", "=", "np", ".", "delete", "(", "col", ".", "copy", "(", ")", ",", "del_index", ")", "row_minus_one", "=", "row", "-", "1", "del_index", "=", "np", ".", "where", "(", "row_minus_one", ">", "335", ")", "row_minus_one", "=", "np", ".", "delete", "(", "row_minus_one", ",", "del_index", ")", "col_minus_one", "=", "np", ".", "delete", "(", "col", ".", "copy", "(", ")", ",", "del_index", ")", "col", "=", "np", ".", "concatenate", "(", "(", "col_plus_one", ",", "col_minus_one", ")", ")", "row", "=", "np", ".", "concatenate", "(", "(", "row_plus_one", ",", "row_minus_one", ")", ")", "return", "make_pixel_mask_from_col_row", "(", "col", "+", "1", ",", "row", "+", "1", ")" ]
Generate xtalk mask (row - 1, row + 1) from pixel mask. Parameters ---------- mask : ndarray Pixel mask. Returns ------- ndarray Xtalk mask. Example ------- Input: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0]] Output: [[0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0] ... [0 1 0 0 0 1 0 1 0 0 ... 0 0 0 1 0 1 0 0 0 1] [0 0 1 0 1 0 0 0 1 0 ... 1 0 1 0 0 0 1 0 1 0]]
[ "Generate", "xtalk", "mask", "(", "row", "-", "1", "row", "+", "1", ")", "from", "pixel", "mask", ".", "Parameters", "----------", "mask", ":", "ndarray", "Pixel", "mask", ".", "Returns", "-------", "ndarray", "Xtalk", "mask", ".", "Example", "-------", "Input", ":", "[[", "1", "0", "0", "0", "0", "0", "1", "0", "0", "0", "...", "0", "0", "0", "0", "1", "0", "0", "0", "0", "0", "]", "[", "0", "0", "0", "1", "0", "0", "0", "0", "0", "1", "...", "0", "1", "0", "0", "0", "0", "0", "1", "0", "0", "]", "...", "[", "1", "0", "0", "0", "0", "0", "1", "0", "0", "0", "...", "0", "0", "0", "0", "1", "0", "0", "0", "0", "0", "]", "[", "0", "0", "0", "1", "0", "0", "0", "0", "0", "1", "...", "0", "1", "0", "0", "0", "0", "0", "1", "0", "0", "]]", "Output", ":", "[[", "0", "1", "0", "0", "0", "1", "0", "1", "0", "0", "...", "0", "0", "0", "1", "0", "1", "0", "0", "0", "1", "]", "[", "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "...", "1", "0", "1", "0", "0", "0", "1", "0", "1", "0", "]", "...", "[", "0", "1", "0", "0", "0", "1", "0", "1", "0", "0", "...", "0", "0", "0", "1", "0", "1", "0", "0", "0", "1", "]", "[", "0", "0", "1", "0", "1", "0", "0", "0", "1", "0", "...", "1", "0", "1", "0", "0", "0", "1", "0", "1", "0", "]]" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L800-L841
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
make_checkerboard_mask
def make_checkerboard_mask(column_distance, row_distance, column_offset=0, row_offset=0, default=0, value=1): """ Generate chessboard/checkerboard mask. Parameters ---------- column_distance : int Column distance of the enabled pixels. row_distance : int Row distance of the enabled pixels. column_offset : int Additional column offset which shifts the columns by the given amount. column_offset : int Additional row offset which shifts the rows by the given amount. Returns ------- ndarray Chessboard mask. Example ------- Input: column_distance : 6 row_distance : 2 Output: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0]] """ col_shape = (336,) col = np.full(col_shape, fill_value=default, dtype=np.uint8) col[::row_distance] = value shape = (80, 336) chessboard_mask = np.full(shape, fill_value=default, dtype=np.uint8) chessboard_mask[column_offset::column_distance * 2] = np.roll(col, row_offset) chessboard_mask[column_distance + column_offset::column_distance * 2] = np.roll(col, row_distance / 2 + row_offset) return chessboard_mask
python
def make_checkerboard_mask(column_distance, row_distance, column_offset=0, row_offset=0, default=0, value=1): """ Generate chessboard/checkerboard mask. Parameters ---------- column_distance : int Column distance of the enabled pixels. row_distance : int Row distance of the enabled pixels. column_offset : int Additional column offset which shifts the columns by the given amount. column_offset : int Additional row offset which shifts the rows by the given amount. Returns ------- ndarray Chessboard mask. Example ------- Input: column_distance : 6 row_distance : 2 Output: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0]] """ col_shape = (336,) col = np.full(col_shape, fill_value=default, dtype=np.uint8) col[::row_distance] = value shape = (80, 336) chessboard_mask = np.full(shape, fill_value=default, dtype=np.uint8) chessboard_mask[column_offset::column_distance * 2] = np.roll(col, row_offset) chessboard_mask[column_distance + column_offset::column_distance * 2] = np.roll(col, row_distance / 2 + row_offset) return chessboard_mask
[ "def", "make_checkerboard_mask", "(", "column_distance", ",", "row_distance", ",", "column_offset", "=", "0", ",", "row_offset", "=", "0", ",", "default", "=", "0", ",", "value", "=", "1", ")", ":", "col_shape", "=", "(", "336", ",", ")", "col", "=", "np", ".", "full", "(", "col_shape", ",", "fill_value", "=", "default", ",", "dtype", "=", "np", ".", "uint8", ")", "col", "[", ":", ":", "row_distance", "]", "=", "value", "shape", "=", "(", "80", ",", "336", ")", "chessboard_mask", "=", "np", ".", "full", "(", "shape", ",", "fill_value", "=", "default", ",", "dtype", "=", "np", ".", "uint8", ")", "chessboard_mask", "[", "column_offset", ":", ":", "column_distance", "*", "2", "]", "=", "np", ".", "roll", "(", "col", ",", "row_offset", ")", "chessboard_mask", "[", "column_distance", "+", "column_offset", ":", ":", "column_distance", "*", "2", "]", "=", "np", ".", "roll", "(", "col", ",", "row_distance", "/", "2", "+", "row_offset", ")", "return", "chessboard_mask" ]
Generate chessboard/checkerboard mask. Parameters ---------- column_distance : int Column distance of the enabled pixels. row_distance : int Row distance of the enabled pixels. column_offset : int Additional column offset which shifts the columns by the given amount. column_offset : int Additional row offset which shifts the rows by the given amount. Returns ------- ndarray Chessboard mask. Example ------- Input: column_distance : 6 row_distance : 2 Output: [[1 0 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 0 0 0 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] ... [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 1 ... 0 1 0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0]]
[ "Generate", "chessboard", "/", "checkerboard", "mask", ".", "Parameters", "----------", "column_distance", ":", "int", "Column", "distance", "of", "the", "enabled", "pixels", ".", "row_distance", ":", "int", "Row", "distance", "of", "the", "enabled", "pixels", ".", "column_offset", ":", "int", "Additional", "column", "offset", "which", "shifts", "the", "columns", "by", "the", "given", "amount", ".", "column_offset", ":", "int", "Additional", "row", "offset", "which", "shifts", "the", "rows", "by", "the", "given", "amount", ".", "Returns", "-------", "ndarray", "Chessboard", "mask", ".", "Example", "-------", "Input", ":", "column_distance", ":", "6", "row_distance", ":", "2", "Output", ":", "[[", "1", "0", "0", "0", "0", "0", "1", "0", "0", "0", "...", "0", "0", "0", "0", "1", "0", "0", "0", "0", "0", "]", "[", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "...", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "]", "[", "0", "0", "0", "1", "0", "0", "0", "0", "0", "1", "...", "0", "1", "0", "0", "0", "0", "0", "1", "0", "0", "]", "...", "[", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "...", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "]", "[", "0", "0", "0", "1", "0", "0", "0", "0", "0", "1", "...", "0", "1", "0", "0", "0", "0", "0", "1", "0", "0", "]", "[", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "...", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "]]" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L844-L886
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
scan_loop
def scan_loop(self, command, repeat_command=100, use_delay=True, additional_delay=0, mask_steps=3, enable_mask_steps=None, enable_double_columns=None, same_mask_for_all_dc=False, fast_dc_loop=True, bol_function=None, eol_function=None, digital_injection=False, enable_shift_masks=None, disable_shift_masks=None, restore_shift_masks=True, mask=None, double_column_correction=False): '''Implementation of the scan loops (mask shifting, loop over double columns, repeatedly sending any arbitrary command). Parameters ---------- command : BitVector (FEI4) command that will be sent out serially. repeat_command : int The number of repetitions command will be sent out each mask step. use_delay : bool Add additional delay to the command (append zeros). This helps to avoid FE data errors because of sending to many commands to the FE chip. additional_delay: int Additional delay to increase the command-to-command delay (in number of clock cycles / 25ns). mask_steps : int Number of mask steps (from 1 to 672). enable_mask_steps : list, tuple List of mask steps which will be applied. Default is all mask steps. From 0 to (mask-1). A value equal None or empty list will select all mask steps. enable_double_columns : list, tuple List of double columns which will be enabled during scan. Default is all double columns. From 0 to 39 (double columns counted from zero). A value equal None or empty list will select all double columns. same_mask_for_all_dc : bool Use same mask for all double columns. This will only affect all shift masks (see enable_shift_masks). Enabling this is in general a good idea since all double columns will have the same configuration and the scan speed can increased by an order of magnitude. fast_dc_loop : bool If True, optimize double column (DC) loop to save time. Note that bol_function and eol_function cannot do register operations, if True. bol_function : function Begin of loop function that will be called each time before sending command. Argument is a function pointer (without braces) or functor. eol_function : function End of loop function that will be called each time after sending command. Argument is a function pointer (without braces) or functor. digital_injection : bool Enables digital injection. C_High and C_Low will be disabled. enable_shift_masks : list, tuple List of enable pixel masks which will be shifted during scan. Mask set to 1 for selected pixels else 0. None will select "Enable", "C_High", "C_Low". disable_shift_masks : list, tuple List of disable pixel masks which will be shifted during scan. Mask set to 0 for selected pixels else 1. None will disable no mask. restore_shift_masks : bool Writing the initial (restored) FE pixel configuration into FE after finishing the scan loop. mask : array-like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked pixel. Masked pixels will be disabled during shifting of the enable shift masks, and enabled during shifting disable shift mask. double_column_correction : str, bool, list, tuple Enables double column PlsrDAC correction. If value is a filename (string) or list/tuple, the default PlsrDAC correction will be overwritten. First line of the file must be a Python list ([0, 0, ...]) ''' if not isinstance(command, bitarray): raise TypeError if enable_shift_masks is None: enable_shift_masks = ["Enable", "C_High", "C_Low"] if disable_shift_masks is None: disable_shift_masks = [] # get PlsrDAC correction if isinstance(double_column_correction, basestring): # from file with open(double_column_correction) as fp: plsr_dac_correction = list(literal_eval(fp.readline().strip())) elif isinstance(double_column_correction, (list, tuple)): # from list/tuple plsr_dac_correction = list(double_column_correction) else: # default if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) and "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_High'] elif "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Med'] elif "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Low'] # initial PlsrDAC value for PlsrDAC correction initial_plsr_dac = self.register.get_global_register_value("PlsrDAC") # create restore point restore_point_name = str(self.run_number) + '_' + self.run_id + '_scan_loop' with self.register.restored(name=restore_point_name): # pre-calculate often used commands conf_mode_command = self.register.get_commands("ConfMode")[0] run_mode_command = self.register.get_commands("RunMode")[0] if use_delay: delay = self.register.get_commands("zeros", length=additional_delay + calculate_wait_cycles(mask_steps))[0] scan_loop_command = command + delay else: scan_loop_command = command def enable_columns(dc): if digital_injection: return [dc * 2 + 1, dc * 2 + 2] else: # analog injection if dc == 0: return [1] elif dc == 39: return [78, 79, 80] else: return [dc * 2, dc * 2 + 1] def write_double_columns(dc): if digital_injection: return [dc] else: # analog injection if dc == 0: return [0] elif dc == 39: return [38, 39] else: return [dc - 1, dc] def get_dc_address_command(dc): commands = [] commands.append(conf_mode_command) self.register.set_global_register_value("Colpr_Addr", dc) commands.append(self.register.get_commands("WrRegister", name=["Colpr_Addr"])[0]) if double_column_correction: self.register.set_global_register_value("PlsrDAC", initial_plsr_dac + int(round(plsr_dac_correction[dc]))) commands.append(self.register.get_commands("WrRegister", name=["PlsrDAC"])[0]) commands.append(run_mode_command) return self.register_utils.concatenate_commands(commands, byte_padding=True) if not enable_mask_steps: enable_mask_steps = range(mask_steps) if not enable_double_columns: enable_double_columns = range(40) # preparing for scan commands = [] commands.append(conf_mode_command) if digital_injection is True: # check if C_High and/or C_Low is in enable_shift_mask and/or disable_shift_mask if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_High".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_High must not be shift mask when using digital injection') if "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_Low".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_Low must not be shift mask when using digital injection') # turn off all injection capacitors by default self.register.set_pixel_register_value("C_High", 0) self.register.set_pixel_register_value("C_Low", 0) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=["C_Low", "C_High"], joint_write=True)) self.register.set_global_register_value("DIGHITIN_SEL", 1) # self.register.set_global_register_value("CalEn", 1) # for GlobalPulse instead Cal-Command else: self.register.set_global_register_value("DIGHITIN_SEL", 0) # setting EnableDigInj to 0 not necessary since DIGHITIN_SEL is turned off # self.register.set_pixel_register_value("EnableDigInj", 0) # plotting registers # plt.clf() # plt.imshow(curr_en_mask.T, interpolation='nearest', aspect="auto") # plt.pcolor(curr_en_mask.T) # plt.colorbar() # plt.savefig('mask_step' + str(mask_step) + '.pdf') commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) for mask_step in enable_mask_steps: if self.abort_run.is_set(): break commands = [] commands.append(conf_mode_command) if same_mask_for_all_dc: # generate and write first mask step if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) else: # set masks to default values if disable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 1), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 0), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) logging.info('%d injection(s): mask step %d %s', repeat_command, mask_step, ('[%d - %d]' % (enable_mask_steps[0], enable_mask_steps[-1])) if len(enable_mask_steps) > 1 else ('[%d]' % enable_mask_steps[0])) if same_mask_for_all_dc: if fast_dc_loop: # fast DC loop with optimized pixel register writing # set repeat, should be 1 by default when arriving here self.dut['TX']['CMD_REPEAT'] = repeat_command # get DC command for the first DC in the list, DC command is byte padded # fill CMD memory with DC command and scan loop command, inside the loop only overwrite DC command dc_address_command = get_dc_address_command(enable_double_columns[0]) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop # get DC command before wait to save some time dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished # only set command after FPGA is ready # overwrite only the DC command in CMD memory self.register_utils.set_command(dc_address_command, set_length=False) # do not set length here, because it was already set up before the loop if bol_function: bol_function() self.dut['TX']['START'] # wait here before we go on because we just jumped out of the loop self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: # the slow DC loop allows writing commands inside bol and eol functions for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() else: if fast_dc_loop: # fast DC loop with optimized pixel register writing dc = enable_double_columns[0] ec = enable_columns(dc) dcs = write_double_columns(dc) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop ec = enable_columns(dc) dcs = write_double_columns(dc) dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished self.register_utils.send_commands(commands) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) if bol_function: bol_function() self.dut['TX']['START'] self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break ec = enable_columns(dc) dcs = write_double_columns(dc) if index != 0: dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() commands = [] commands.extend(self.register.get_commands("ConfMode")) # write registers that were changed in scan_loop() commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) if restore_shift_masks: commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=disable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=enable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="EnableDigInj")) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands)
python
def scan_loop(self, command, repeat_command=100, use_delay=True, additional_delay=0, mask_steps=3, enable_mask_steps=None, enable_double_columns=None, same_mask_for_all_dc=False, fast_dc_loop=True, bol_function=None, eol_function=None, digital_injection=False, enable_shift_masks=None, disable_shift_masks=None, restore_shift_masks=True, mask=None, double_column_correction=False): '''Implementation of the scan loops (mask shifting, loop over double columns, repeatedly sending any arbitrary command). Parameters ---------- command : BitVector (FEI4) command that will be sent out serially. repeat_command : int The number of repetitions command will be sent out each mask step. use_delay : bool Add additional delay to the command (append zeros). This helps to avoid FE data errors because of sending to many commands to the FE chip. additional_delay: int Additional delay to increase the command-to-command delay (in number of clock cycles / 25ns). mask_steps : int Number of mask steps (from 1 to 672). enable_mask_steps : list, tuple List of mask steps which will be applied. Default is all mask steps. From 0 to (mask-1). A value equal None or empty list will select all mask steps. enable_double_columns : list, tuple List of double columns which will be enabled during scan. Default is all double columns. From 0 to 39 (double columns counted from zero). A value equal None or empty list will select all double columns. same_mask_for_all_dc : bool Use same mask for all double columns. This will only affect all shift masks (see enable_shift_masks). Enabling this is in general a good idea since all double columns will have the same configuration and the scan speed can increased by an order of magnitude. fast_dc_loop : bool If True, optimize double column (DC) loop to save time. Note that bol_function and eol_function cannot do register operations, if True. bol_function : function Begin of loop function that will be called each time before sending command. Argument is a function pointer (without braces) or functor. eol_function : function End of loop function that will be called each time after sending command. Argument is a function pointer (without braces) or functor. digital_injection : bool Enables digital injection. C_High and C_Low will be disabled. enable_shift_masks : list, tuple List of enable pixel masks which will be shifted during scan. Mask set to 1 for selected pixels else 0. None will select "Enable", "C_High", "C_Low". disable_shift_masks : list, tuple List of disable pixel masks which will be shifted during scan. Mask set to 0 for selected pixels else 1. None will disable no mask. restore_shift_masks : bool Writing the initial (restored) FE pixel configuration into FE after finishing the scan loop. mask : array-like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked pixel. Masked pixels will be disabled during shifting of the enable shift masks, and enabled during shifting disable shift mask. double_column_correction : str, bool, list, tuple Enables double column PlsrDAC correction. If value is a filename (string) or list/tuple, the default PlsrDAC correction will be overwritten. First line of the file must be a Python list ([0, 0, ...]) ''' if not isinstance(command, bitarray): raise TypeError if enable_shift_masks is None: enable_shift_masks = ["Enable", "C_High", "C_Low"] if disable_shift_masks is None: disable_shift_masks = [] # get PlsrDAC correction if isinstance(double_column_correction, basestring): # from file with open(double_column_correction) as fp: plsr_dac_correction = list(literal_eval(fp.readline().strip())) elif isinstance(double_column_correction, (list, tuple)): # from list/tuple plsr_dac_correction = list(double_column_correction) else: # default if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) and "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_High'] elif "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Med'] elif "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks): plsr_dac_correction = self.register.calibration_parameters['Pulser_Corr_C_Inj_Low'] # initial PlsrDAC value for PlsrDAC correction initial_plsr_dac = self.register.get_global_register_value("PlsrDAC") # create restore point restore_point_name = str(self.run_number) + '_' + self.run_id + '_scan_loop' with self.register.restored(name=restore_point_name): # pre-calculate often used commands conf_mode_command = self.register.get_commands("ConfMode")[0] run_mode_command = self.register.get_commands("RunMode")[0] if use_delay: delay = self.register.get_commands("zeros", length=additional_delay + calculate_wait_cycles(mask_steps))[0] scan_loop_command = command + delay else: scan_loop_command = command def enable_columns(dc): if digital_injection: return [dc * 2 + 1, dc * 2 + 2] else: # analog injection if dc == 0: return [1] elif dc == 39: return [78, 79, 80] else: return [dc * 2, dc * 2 + 1] def write_double_columns(dc): if digital_injection: return [dc] else: # analog injection if dc == 0: return [0] elif dc == 39: return [38, 39] else: return [dc - 1, dc] def get_dc_address_command(dc): commands = [] commands.append(conf_mode_command) self.register.set_global_register_value("Colpr_Addr", dc) commands.append(self.register.get_commands("WrRegister", name=["Colpr_Addr"])[0]) if double_column_correction: self.register.set_global_register_value("PlsrDAC", initial_plsr_dac + int(round(plsr_dac_correction[dc]))) commands.append(self.register.get_commands("WrRegister", name=["PlsrDAC"])[0]) commands.append(run_mode_command) return self.register_utils.concatenate_commands(commands, byte_padding=True) if not enable_mask_steps: enable_mask_steps = range(mask_steps) if not enable_double_columns: enable_double_columns = range(40) # preparing for scan commands = [] commands.append(conf_mode_command) if digital_injection is True: # check if C_High and/or C_Low is in enable_shift_mask and/or disable_shift_mask if "C_High".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_High".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_High must not be shift mask when using digital injection') if "C_Low".lower() in map(lambda x: x.lower(), enable_shift_masks) or "C_Low".lower() in map(lambda x: x.lower(), disable_shift_masks): raise ValueError('C_Low must not be shift mask when using digital injection') # turn off all injection capacitors by default self.register.set_pixel_register_value("C_High", 0) self.register.set_pixel_register_value("C_Low", 0) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=["C_Low", "C_High"], joint_write=True)) self.register.set_global_register_value("DIGHITIN_SEL", 1) # self.register.set_global_register_value("CalEn", 1) # for GlobalPulse instead Cal-Command else: self.register.set_global_register_value("DIGHITIN_SEL", 0) # setting EnableDigInj to 0 not necessary since DIGHITIN_SEL is turned off # self.register.set_pixel_register_value("EnableDigInj", 0) # plotting registers # plt.clf() # plt.imshow(curr_en_mask.T, interpolation='nearest', aspect="auto") # plt.pcolor(curr_en_mask.T) # plt.colorbar() # plt.savefig('mask_step' + str(mask_step) + '.pdf') commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) for mask_step in enable_mask_steps: if self.abort_run.is_set(): break commands = [] commands.append(conf_mode_command) if same_mask_for_all_dc: # generate and write first mask step if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False if mask is not None else True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) else: # set masks to default values if disable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 1), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: map(lambda mask_name: self.register.set_pixel_register_value(mask_name, 0), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name=enable_shift_masks, joint_write=True)) if digital_injection is True: # write EnableDigInj last # write DIGHITIN_SEL since after mask writing it is disabled self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) logging.info('%d injection(s): mask step %d %s', repeat_command, mask_step, ('[%d - %d]' % (enable_mask_steps[0], enable_mask_steps[-1])) if len(enable_mask_steps) > 1 else ('[%d]' % enable_mask_steps[0])) if same_mask_for_all_dc: if fast_dc_loop: # fast DC loop with optimized pixel register writing # set repeat, should be 1 by default when arriving here self.dut['TX']['CMD_REPEAT'] = repeat_command # get DC command for the first DC in the list, DC command is byte padded # fill CMD memory with DC command and scan loop command, inside the loop only overwrite DC command dc_address_command = get_dc_address_command(enable_double_columns[0]) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop # get DC command before wait to save some time dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished # only set command after FPGA is ready # overwrite only the DC command in CMD memory self.register_utils.set_command(dc_address_command, set_length=False) # do not set length here, because it was already set up before the loop if bol_function: bol_function() self.dut['TX']['START'] # wait here before we go on because we just jumped out of the loop self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: # the slow DC loop allows writing commands inside bol and eol functions for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() else: if fast_dc_loop: # fast DC loop with optimized pixel register writing dc = enable_double_columns[0] ec = enable_columns(dc) dcs = write_double_columns(dc) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break if index != 0: # full command is already set before loop ec = enable_columns(dc) dcs = write_double_columns(dc) dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) dc_address_command = get_dc_address_command(dc) self.register_utils.wait_for_command() if eol_function: eol_function() # do this after command has finished self.register_utils.send_commands(commands) self.dut['TX']['START_SEQUENCE_LENGTH'] = len(dc_address_command) self.dut['TX']['CMD_REPEAT'] = repeat_command self.register_utils.set_command(command=self.register_utils.concatenate_commands((dc_address_command, scan_loop_command), byte_padding=False)) if bol_function: bol_function() self.dut['TX']['START'] self.register_utils.wait_for_command() if eol_function: eol_function() self.dut['TX']['START_SEQUENCE_LENGTH'] = 0 else: for index, dc in enumerate(enable_double_columns): if self.abort_run.is_set(): break ec = enable_columns(dc) dcs = write_double_columns(dc) if index != 0: dcs.extend(write_double_columns(enable_double_columns[index - 1])) commands = [] commands.append(conf_mode_command) if disable_shift_masks: curr_dis_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, default=1, value=0, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_dis_mask), disable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=disable_shift_masks, joint_write=True)) if enable_shift_masks: curr_en_mask = make_pixel_mask(steps=mask_steps, shift=mask_step, enable_columns=ec, mask=mask) map(lambda mask_name: self.register.set_pixel_register_value(mask_name, curr_en_mask), enable_shift_masks) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=enable_shift_masks, joint_write=True)) if digital_injection is True: self.register.set_global_register_value("DIGHITIN_SEL", 1) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL"])) self.register_utils.send_commands(commands) dc_address_command = get_dc_address_command(dc) self.register_utils.send_command(dc_address_command) if bol_function: bol_function() self.register_utils.send_command(scan_loop_command, repeat=repeat_command) if eol_function: eol_function() commands = [] commands.extend(self.register.get_commands("ConfMode")) # write registers that were changed in scan_loop() commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) if restore_shift_masks: commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("WrRegister", name=["DIGHITIN_SEL", "Colpr_Addr", "PlsrDAC"])) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=disable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name=enable_shift_masks)) commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name="EnableDigInj")) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands)
[ "def", "scan_loop", "(", "self", ",", "command", ",", "repeat_command", "=", "100", ",", "use_delay", "=", "True", ",", "additional_delay", "=", "0", ",", "mask_steps", "=", "3", ",", "enable_mask_steps", "=", "None", ",", "enable_double_columns", "=", "None", ",", "same_mask_for_all_dc", "=", "False", ",", "fast_dc_loop", "=", "True", ",", "bol_function", "=", "None", ",", "eol_function", "=", "None", ",", "digital_injection", "=", "False", ",", "enable_shift_masks", "=", "None", ",", "disable_shift_masks", "=", "None", ",", "restore_shift_masks", "=", "True", ",", "mask", "=", "None", ",", "double_column_correction", "=", "False", ")", ":", "if", "not", "isinstance", "(", "command", ",", "bitarray", ")", ":", "raise", "TypeError", "if", "enable_shift_masks", "is", "None", ":", "enable_shift_masks", "=", "[", "\"Enable\"", ",", "\"C_High\"", ",", "\"C_Low\"", "]", "if", "disable_shift_masks", "is", "None", ":", "disable_shift_masks", "=", "[", "]", "# get PlsrDAC correction\r", "if", "isinstance", "(", "double_column_correction", ",", "basestring", ")", ":", "# from file\r", "with", "open", "(", "double_column_correction", ")", "as", "fp", ":", "plsr_dac_correction", "=", "list", "(", "literal_eval", "(", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", ")", ")", "elif", "isinstance", "(", "double_column_correction", ",", "(", "list", ",", "tuple", ")", ")", ":", "# from list/tuple\r", "plsr_dac_correction", "=", "list", "(", "double_column_correction", ")", "else", ":", "# default\r", "if", "\"C_High\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", "and", "\"C_Low\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", ":", "plsr_dac_correction", "=", "self", ".", "register", ".", "calibration_parameters", "[", "'Pulser_Corr_C_Inj_High'", "]", "elif", "\"C_High\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", ":", "plsr_dac_correction", "=", "self", ".", "register", ".", "calibration_parameters", "[", "'Pulser_Corr_C_Inj_Med'", "]", "elif", "\"C_Low\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", ":", "plsr_dac_correction", "=", "self", ".", "register", ".", "calibration_parameters", "[", "'Pulser_Corr_C_Inj_Low'", "]", "# initial PlsrDAC value for PlsrDAC correction\r", "initial_plsr_dac", "=", "self", ".", "register", ".", "get_global_register_value", "(", "\"PlsrDAC\"", ")", "# create restore point\r", "restore_point_name", "=", "str", "(", "self", ".", "run_number", ")", "+", "'_'", "+", "self", ".", "run_id", "+", "'_scan_loop'", "with", "self", ".", "register", ".", "restored", "(", "name", "=", "restore_point_name", ")", ":", "# pre-calculate often used commands\r", "conf_mode_command", "=", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", "[", "0", "]", "run_mode_command", "=", "self", ".", "register", ".", "get_commands", "(", "\"RunMode\"", ")", "[", "0", "]", "if", "use_delay", ":", "delay", "=", "self", ".", "register", ".", "get_commands", "(", "\"zeros\"", ",", "length", "=", "additional_delay", "+", "calculate_wait_cycles", "(", "mask_steps", ")", ")", "[", "0", "]", "scan_loop_command", "=", "command", "+", "delay", "else", ":", "scan_loop_command", "=", "command", "def", "enable_columns", "(", "dc", ")", ":", "if", "digital_injection", ":", "return", "[", "dc", "*", "2", "+", "1", ",", "dc", "*", "2", "+", "2", "]", "else", ":", "# analog injection\r", "if", "dc", "==", "0", ":", "return", "[", "1", "]", "elif", "dc", "==", "39", ":", "return", "[", "78", ",", "79", ",", "80", "]", "else", ":", "return", "[", "dc", "*", "2", ",", "dc", "*", "2", "+", "1", "]", "def", "write_double_columns", "(", "dc", ")", ":", "if", "digital_injection", ":", "return", "[", "dc", "]", "else", ":", "# analog injection\r", "if", "dc", "==", "0", ":", "return", "[", "0", "]", "elif", "dc", "==", "39", ":", "return", "[", "38", ",", "39", "]", "else", ":", "return", "[", "dc", "-", "1", ",", "dc", "]", "def", "get_dc_address_command", "(", "dc", ")", ":", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "self", ".", "register", ".", "set_global_register_value", "(", "\"Colpr_Addr\"", ",", "dc", ")", "commands", ".", "append", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"Colpr_Addr\"", "]", ")", "[", "0", "]", ")", "if", "double_column_correction", ":", "self", ".", "register", ".", "set_global_register_value", "(", "\"PlsrDAC\"", ",", "initial_plsr_dac", "+", "int", "(", "round", "(", "plsr_dac_correction", "[", "dc", "]", ")", ")", ")", "commands", ".", "append", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"PlsrDAC\"", "]", ")", "[", "0", "]", ")", "commands", ".", "append", "(", "run_mode_command", ")", "return", "self", ".", "register_utils", ".", "concatenate_commands", "(", "commands", ",", "byte_padding", "=", "True", ")", "if", "not", "enable_mask_steps", ":", "enable_mask_steps", "=", "range", "(", "mask_steps", ")", "if", "not", "enable_double_columns", ":", "enable_double_columns", "=", "range", "(", "40", ")", "# preparing for scan\r", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "if", "digital_injection", "is", "True", ":", "# check if C_High and/or C_Low is in enable_shift_mask and/or disable_shift_mask\r", "if", "\"C_High\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", "or", "\"C_High\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "disable_shift_masks", ")", ":", "raise", "ValueError", "(", "'C_High must not be shift mask when using digital injection'", ")", "if", "\"C_Low\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "enable_shift_masks", ")", "or", "\"C_Low\"", ".", "lower", "(", ")", "in", "map", "(", "lambda", "x", ":", "x", ".", "lower", "(", ")", ",", "disable_shift_masks", ")", ":", "raise", "ValueError", "(", "'C_Low must not be shift mask when using digital injection'", ")", "# turn off all injection capacitors by default\r", "self", ".", "register", ".", "set_pixel_register_value", "(", "\"C_High\"", ",", "0", ")", "self", ".", "register", ".", "set_pixel_register_value", "(", "\"C_Low\"", ",", "0", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "True", ",", "name", "=", "[", "\"C_Low\"", ",", "\"C_High\"", "]", ",", "joint_write", "=", "True", ")", ")", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "# self.register.set_global_register_value(\"CalEn\", 1) # for GlobalPulse instead Cal-Command\r", "else", ":", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "0", ")", "# setting EnableDigInj to 0 not necessary since DIGHITIN_SEL is turned off\r", "# self.register.set_pixel_register_value(\"EnableDigInj\", 0)\r", "# plotting registers\r", "# plt.clf()\r", "# plt.imshow(curr_en_mask.T, interpolation='nearest', aspect=\"auto\")\r", "# plt.pcolor(curr_en_mask.T)\r", "# plt.colorbar()\r", "# plt.savefig('mask_step' + str(mask_step) + '.pdf')\r", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "for", "mask_step", "in", "enable_mask_steps", ":", "if", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "break", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "if", "same_mask_for_all_dc", ":", "# generate and write first mask step\r", "if", "disable_shift_masks", ":", "curr_dis_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "default", "=", "1", ",", "value", "=", "0", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_dis_mask", ")", ",", "disable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", "if", "mask", "is", "not", "None", "else", "True", ",", "name", "=", "disable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "enable_shift_masks", ":", "curr_en_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_en_mask", ")", ",", "enable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", "if", "mask", "is", "not", "None", "else", "True", ",", "name", "=", "enable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "digital_injection", "is", "True", ":", "# write EnableDigInj last\r", "# write DIGHITIN_SEL since after mask writing it is disabled\r", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "else", ":", "# set masks to default values\r", "if", "disable_shift_masks", ":", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "1", ")", ",", "disable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "True", ",", "name", "=", "disable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "enable_shift_masks", ":", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "0", ")", ",", "enable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "True", ",", "name", "=", "enable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "digital_injection", "is", "True", ":", "# write EnableDigInj last\r", "# write DIGHITIN_SEL since after mask writing it is disabled\r", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "logging", ".", "info", "(", "'%d injection(s): mask step %d %s'", ",", "repeat_command", ",", "mask_step", ",", "(", "'[%d - %d]'", "%", "(", "enable_mask_steps", "[", "0", "]", ",", "enable_mask_steps", "[", "-", "1", "]", ")", ")", "if", "len", "(", "enable_mask_steps", ")", ">", "1", "else", "(", "'[%d]'", "%", "enable_mask_steps", "[", "0", "]", ")", ")", "if", "same_mask_for_all_dc", ":", "if", "fast_dc_loop", ":", "# fast DC loop with optimized pixel register writing\r", "# set repeat, should be 1 by default when arriving here\r", "self", ".", "dut", "[", "'TX'", "]", "[", "'CMD_REPEAT'", "]", "=", "repeat_command", "# get DC command for the first DC in the list, DC command is byte padded\r", "# fill CMD memory with DC command and scan loop command, inside the loop only overwrite DC command\r", "dc_address_command", "=", "get_dc_address_command", "(", "enable_double_columns", "[", "0", "]", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START_SEQUENCE_LENGTH'", "]", "=", "len", "(", "dc_address_command", ")", "self", ".", "register_utils", ".", "set_command", "(", "command", "=", "self", ".", "register_utils", ".", "concatenate_commands", "(", "(", "dc_address_command", ",", "scan_loop_command", ")", ",", "byte_padding", "=", "False", ")", ")", "for", "index", ",", "dc", "in", "enumerate", "(", "enable_double_columns", ")", ":", "if", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "break", "if", "index", "!=", "0", ":", "# full command is already set before loop\r", "# get DC command before wait to save some time\r", "dc_address_command", "=", "get_dc_address_command", "(", "dc", ")", "self", ".", "register_utils", ".", "wait_for_command", "(", ")", "if", "eol_function", ":", "eol_function", "(", ")", "# do this after command has finished\r", "# only set command after FPGA is ready\r", "# overwrite only the DC command in CMD memory\r", "self", ".", "register_utils", ".", "set_command", "(", "dc_address_command", ",", "set_length", "=", "False", ")", "# do not set length here, because it was already set up before the loop\r", "if", "bol_function", ":", "bol_function", "(", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START'", "]", "# wait here before we go on because we just jumped out of the loop\r", "self", ".", "register_utils", ".", "wait_for_command", "(", ")", "if", "eol_function", ":", "eol_function", "(", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START_SEQUENCE_LENGTH'", "]", "=", "0", "else", ":", "# the slow DC loop allows writing commands inside bol and eol functions\r", "for", "index", ",", "dc", "in", "enumerate", "(", "enable_double_columns", ")", ":", "if", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "break", "dc_address_command", "=", "get_dc_address_command", "(", "dc", ")", "self", ".", "register_utils", ".", "send_command", "(", "dc_address_command", ")", "if", "bol_function", ":", "bol_function", "(", ")", "self", ".", "register_utils", ".", "send_command", "(", "scan_loop_command", ",", "repeat", "=", "repeat_command", ")", "if", "eol_function", ":", "eol_function", "(", ")", "else", ":", "if", "fast_dc_loop", ":", "# fast DC loop with optimized pixel register writing\r", "dc", "=", "enable_double_columns", "[", "0", "]", "ec", "=", "enable_columns", "(", "dc", ")", "dcs", "=", "write_double_columns", "(", "dc", ")", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "if", "disable_shift_masks", ":", "curr_dis_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "default", "=", "1", ",", "value", "=", "0", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_dis_mask", ")", ",", "disable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "disable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "enable_shift_masks", ":", "curr_en_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_en_mask", ")", ",", "enable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "enable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "digital_injection", "is", "True", ":", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "dc_address_command", "=", "get_dc_address_command", "(", "dc", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START_SEQUENCE_LENGTH'", "]", "=", "len", "(", "dc_address_command", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'CMD_REPEAT'", "]", "=", "repeat_command", "self", ".", "register_utils", ".", "set_command", "(", "command", "=", "self", ".", "register_utils", ".", "concatenate_commands", "(", "(", "dc_address_command", ",", "scan_loop_command", ")", ",", "byte_padding", "=", "False", ")", ")", "for", "index", ",", "dc", "in", "enumerate", "(", "enable_double_columns", ")", ":", "if", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "break", "if", "index", "!=", "0", ":", "# full command is already set before loop\r", "ec", "=", "enable_columns", "(", "dc", ")", "dcs", "=", "write_double_columns", "(", "dc", ")", "dcs", ".", "extend", "(", "write_double_columns", "(", "enable_double_columns", "[", "index", "-", "1", "]", ")", ")", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "if", "disable_shift_masks", ":", "curr_dis_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "default", "=", "1", ",", "value", "=", "0", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_dis_mask", ")", ",", "disable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "disable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "enable_shift_masks", ":", "curr_en_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_en_mask", ")", ",", "enable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "enable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "digital_injection", "is", "True", ":", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "dc_address_command", "=", "get_dc_address_command", "(", "dc", ")", "self", ".", "register_utils", ".", "wait_for_command", "(", ")", "if", "eol_function", ":", "eol_function", "(", ")", "# do this after command has finished\r", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START_SEQUENCE_LENGTH'", "]", "=", "len", "(", "dc_address_command", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'CMD_REPEAT'", "]", "=", "repeat_command", "self", ".", "register_utils", ".", "set_command", "(", "command", "=", "self", ".", "register_utils", ".", "concatenate_commands", "(", "(", "dc_address_command", ",", "scan_loop_command", ")", ",", "byte_padding", "=", "False", ")", ")", "if", "bol_function", ":", "bol_function", "(", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START'", "]", "self", ".", "register_utils", ".", "wait_for_command", "(", ")", "if", "eol_function", ":", "eol_function", "(", ")", "self", ".", "dut", "[", "'TX'", "]", "[", "'START_SEQUENCE_LENGTH'", "]", "=", "0", "else", ":", "for", "index", ",", "dc", "in", "enumerate", "(", "enable_double_columns", ")", ":", "if", "self", ".", "abort_run", ".", "is_set", "(", ")", ":", "break", "ec", "=", "enable_columns", "(", "dc", ")", "dcs", "=", "write_double_columns", "(", "dc", ")", "if", "index", "!=", "0", ":", "dcs", ".", "extend", "(", "write_double_columns", "(", "enable_double_columns", "[", "index", "-", "1", "]", ")", ")", "commands", "=", "[", "]", "commands", ".", "append", "(", "conf_mode_command", ")", "if", "disable_shift_masks", ":", "curr_dis_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "default", "=", "1", ",", "value", "=", "0", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_dis_mask", ")", ",", "disable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "disable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "enable_shift_masks", ":", "curr_en_mask", "=", "make_pixel_mask", "(", "steps", "=", "mask_steps", ",", "shift", "=", "mask_step", ",", "enable_columns", "=", "ec", ",", "mask", "=", "mask", ")", "map", "(", "lambda", "mask_name", ":", "self", ".", "register", ".", "set_pixel_register_value", "(", "mask_name", ",", "curr_en_mask", ")", ",", "enable_shift_masks", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "dcs", "=", "dcs", ",", "name", "=", "enable_shift_masks", ",", "joint_write", "=", "True", ")", ")", "if", "digital_injection", "is", "True", ":", "self", ".", "register", ".", "set_global_register_value", "(", "\"DIGHITIN_SEL\"", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", "]", ")", ")", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")", "dc_address_command", "=", "get_dc_address_command", "(", "dc", ")", "self", ".", "register_utils", ".", "send_command", "(", "dc_address_command", ")", "if", "bol_function", ":", "bol_function", "(", ")", "self", ".", "register_utils", ".", "send_command", "(", "scan_loop_command", ",", "repeat", "=", "repeat_command", ")", "if", "eol_function", ":", "eol_function", "(", ")", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "# write registers that were changed in scan_loop()\r", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", ",", "\"Colpr_Addr\"", ",", "\"PlsrDAC\"", "]", ")", ")", "if", "restore_shift_masks", ":", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "\"DIGHITIN_SEL\"", ",", "\"Colpr_Addr\"", ",", "\"PlsrDAC\"", "]", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "name", "=", "disable_shift_masks", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "name", "=", "enable_shift_masks", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrFrontEnd\"", ",", "same_mask_for_all_dc", "=", "False", ",", "name", "=", "\"EnableDigInj\"", ")", ")", "# commands.extend(self.register.get_commands(\"RunMode\"))\r", "self", ".", "register_utils", ".", "send_commands", "(", "commands", ")" ]
Implementation of the scan loops (mask shifting, loop over double columns, repeatedly sending any arbitrary command). Parameters ---------- command : BitVector (FEI4) command that will be sent out serially. repeat_command : int The number of repetitions command will be sent out each mask step. use_delay : bool Add additional delay to the command (append zeros). This helps to avoid FE data errors because of sending to many commands to the FE chip. additional_delay: int Additional delay to increase the command-to-command delay (in number of clock cycles / 25ns). mask_steps : int Number of mask steps (from 1 to 672). enable_mask_steps : list, tuple List of mask steps which will be applied. Default is all mask steps. From 0 to (mask-1). A value equal None or empty list will select all mask steps. enable_double_columns : list, tuple List of double columns which will be enabled during scan. Default is all double columns. From 0 to 39 (double columns counted from zero). A value equal None or empty list will select all double columns. same_mask_for_all_dc : bool Use same mask for all double columns. This will only affect all shift masks (see enable_shift_masks). Enabling this is in general a good idea since all double columns will have the same configuration and the scan speed can increased by an order of magnitude. fast_dc_loop : bool If True, optimize double column (DC) loop to save time. Note that bol_function and eol_function cannot do register operations, if True. bol_function : function Begin of loop function that will be called each time before sending command. Argument is a function pointer (without braces) or functor. eol_function : function End of loop function that will be called each time after sending command. Argument is a function pointer (without braces) or functor. digital_injection : bool Enables digital injection. C_High and C_Low will be disabled. enable_shift_masks : list, tuple List of enable pixel masks which will be shifted during scan. Mask set to 1 for selected pixels else 0. None will select "Enable", "C_High", "C_Low". disable_shift_masks : list, tuple List of disable pixel masks which will be shifted during scan. Mask set to 0 for selected pixels else 1. None will disable no mask. restore_shift_masks : bool Writing the initial (restored) FE pixel configuration into FE after finishing the scan loop. mask : array-like Additional mask. Must be convertible to an array of booleans with the same shape as mask array. True indicates a masked pixel. Masked pixels will be disabled during shifting of the enable shift masks, and enabled during shifting disable shift mask. double_column_correction : str, bool, list, tuple Enables double column PlsrDAC correction. If value is a filename (string) or list/tuple, the default PlsrDAC correction will be overwritten. First line of the file must be a Python list ([0, 0, ...])
[ "Implementation", "of", "the", "scan", "loops", "(", "mask", "shifting", "loop", "over", "double", "columns", "repeatedly", "sending", "any", "arbitrary", "command", ")", ".", "Parameters", "----------", "command", ":", "BitVector", "(", "FEI4", ")", "command", "that", "will", "be", "sent", "out", "serially", ".", "repeat_command", ":", "int", "The", "number", "of", "repetitions", "command", "will", "be", "sent", "out", "each", "mask", "step", ".", "use_delay", ":", "bool", "Add", "additional", "delay", "to", "the", "command", "(", "append", "zeros", ")", ".", "This", "helps", "to", "avoid", "FE", "data", "errors", "because", "of", "sending", "to", "many", "commands", "to", "the", "FE", "chip", ".", "additional_delay", ":", "int", "Additional", "delay", "to", "increase", "the", "command", "-", "to", "-", "command", "delay", "(", "in", "number", "of", "clock", "cycles", "/", "25ns", ")", ".", "mask_steps", ":", "int", "Number", "of", "mask", "steps", "(", "from", "1", "to", "672", ")", ".", "enable_mask_steps", ":", "list", "tuple", "List", "of", "mask", "steps", "which", "will", "be", "applied", ".", "Default", "is", "all", "mask", "steps", ".", "From", "0", "to", "(", "mask", "-", "1", ")", ".", "A", "value", "equal", "None", "or", "empty", "list", "will", "select", "all", "mask", "steps", ".", "enable_double_columns", ":", "list", "tuple", "List", "of", "double", "columns", "which", "will", "be", "enabled", "during", "scan", ".", "Default", "is", "all", "double", "columns", ".", "From", "0", "to", "39", "(", "double", "columns", "counted", "from", "zero", ")", ".", "A", "value", "equal", "None", "or", "empty", "list", "will", "select", "all", "double", "columns", ".", "same_mask_for_all_dc", ":", "bool", "Use", "same", "mask", "for", "all", "double", "columns", ".", "This", "will", "only", "affect", "all", "shift", "masks", "(", "see", "enable_shift_masks", ")", ".", "Enabling", "this", "is", "in", "general", "a", "good", "idea", "since", "all", "double", "columns", "will", "have", "the", "same", "configuration", "and", "the", "scan", "speed", "can", "increased", "by", "an", "order", "of", "magnitude", ".", "fast_dc_loop", ":", "bool", "If", "True", "optimize", "double", "column", "(", "DC", ")", "loop", "to", "save", "time", ".", "Note", "that", "bol_function", "and", "eol_function", "cannot", "do", "register", "operations", "if", "True", ".", "bol_function", ":", "function", "Begin", "of", "loop", "function", "that", "will", "be", "called", "each", "time", "before", "sending", "command", ".", "Argument", "is", "a", "function", "pointer", "(", "without", "braces", ")", "or", "functor", ".", "eol_function", ":", "function", "End", "of", "loop", "function", "that", "will", "be", "called", "each", "time", "after", "sending", "command", ".", "Argument", "is", "a", "function", "pointer", "(", "without", "braces", ")", "or", "functor", ".", "digital_injection", ":", "bool", "Enables", "digital", "injection", ".", "C_High", "and", "C_Low", "will", "be", "disabled", ".", "enable_shift_masks", ":", "list", "tuple", "List", "of", "enable", "pixel", "masks", "which", "will", "be", "shifted", "during", "scan", ".", "Mask", "set", "to", "1", "for", "selected", "pixels", "else", "0", ".", "None", "will", "select", "Enable", "C_High", "C_Low", ".", "disable_shift_masks", ":", "list", "tuple", "List", "of", "disable", "pixel", "masks", "which", "will", "be", "shifted", "during", "scan", ".", "Mask", "set", "to", "0", "for", "selected", "pixels", "else", "1", ".", "None", "will", "disable", "no", "mask", ".", "restore_shift_masks", ":", "bool", "Writing", "the", "initial", "(", "restored", ")", "FE", "pixel", "configuration", "into", "FE", "after", "finishing", "the", "scan", "loop", ".", "mask", ":", "array", "-", "like", "Additional", "mask", ".", "Must", "be", "convertible", "to", "an", "array", "of", "booleans", "with", "the", "same", "shape", "as", "mask", "array", ".", "True", "indicates", "a", "masked", "pixel", ".", "Masked", "pixels", "will", "be", "disabled", "during", "shifting", "of", "the", "enable", "shift", "masks", "and", "enabled", "during", "shifting", "disable", "shift", "mask", ".", "double_column_correction", ":", "str", "bool", "list", "tuple", "Enables", "double", "column", "PlsrDAC", "correction", ".", "If", "value", "is", "a", "filename", "(", "string", ")", "or", "list", "/", "tuple", "the", "default", "PlsrDAC", "correction", "will", "be", "overwritten", ".", "First", "line", "of", "the", "file", "must", "be", "a", "Python", "list", "(", "[", "0", "0", "...", "]", ")" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L913-L1252
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
FEI4RegisterUtils.reset_service_records
def reset_service_records(self): '''Resetting Service Records This will reset Service Record counters. This will also bring back alive some FE where the output FIFO is stuck (no data is coming out in run mode). This should be only issued after power up and in the case of a stuck FIFO, otherwise the BCID counter starts jumping. ''' logging.info('Resetting Service Records') commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register.set_global_register_value('ReadErrorReq', 1) commands.extend(self.register.get_commands("WrRegister", name=['ReadErrorReq'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('ReadErrorReq', 0) commands.extend(self.register.get_commands("WrRegister", name=['ReadErrorReq'])) commands.extend(self.register.get_commands("RunMode")) commands.extend(self.register.get_commands("ConfMode")) self.send_commands(commands)
python
def reset_service_records(self): '''Resetting Service Records This will reset Service Record counters. This will also bring back alive some FE where the output FIFO is stuck (no data is coming out in run mode). This should be only issued after power up and in the case of a stuck FIFO, otherwise the BCID counter starts jumping. ''' logging.info('Resetting Service Records') commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register.set_global_register_value('ReadErrorReq', 1) commands.extend(self.register.get_commands("WrRegister", name=['ReadErrorReq'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('ReadErrorReq', 0) commands.extend(self.register.get_commands("WrRegister", name=['ReadErrorReq'])) commands.extend(self.register.get_commands("RunMode")) commands.extend(self.register.get_commands("ConfMode")) self.send_commands(commands)
[ "def", "reset_service_records", "(", "self", ")", ":", "logging", ".", "info", "(", "'Resetting Service Records'", ")", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "self", ".", "register", ".", "set_global_register_value", "(", "'ReadErrorReq'", ",", "1", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "'ReadErrorReq'", "]", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"GlobalPulse\"", ",", "Width", "=", "0", ")", ")", "self", ".", "register", ".", "set_global_register_value", "(", "'ReadErrorReq'", ",", "0", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"WrRegister\"", ",", "name", "=", "[", "'ReadErrorReq'", "]", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"RunMode\"", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "self", ".", "send_commands", "(", "commands", ")" ]
Resetting Service Records This will reset Service Record counters. This will also bring back alive some FE where the output FIFO is stuck (no data is coming out in run mode). This should be only issued after power up and in the case of a stuck FIFO, otherwise the BCID counter starts jumping.
[ "Resetting", "Service", "Records", "This", "will", "reset", "Service", "Record", "counters", ".", "This", "will", "also", "bring", "back", "alive", "some", "FE", "where", "the", "output", "FIFO", "is", "stuck", "(", "no", "data", "is", "coming", "out", "in", "run", "mode", ")", ".", "This", "should", "be", "only", "issued", "after", "power", "up", "and", "in", "the", "case", "of", "a", "stuck", "FIFO", "otherwise", "the", "BCID", "counter", "starts", "jumping", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L164-L180
SiLab-Bonn/pyBAR
pybar/fei4/register_utils.py
FEI4RegisterUtils.reset_bunch_counter
def reset_bunch_counter(self): '''Resetting Bunch Counter ''' logging.info('Resetting Bunch Counter') commands = [] commands.extend(self.register.get_commands("RunMode")) commands.extend(self.register.get_commands("BCR")) self.send_commands(commands) time.sleep(0.1) commands = [] commands.extend(self.register.get_commands("ConfMode")) self.send_commands(commands)
python
def reset_bunch_counter(self): '''Resetting Bunch Counter ''' logging.info('Resetting Bunch Counter') commands = [] commands.extend(self.register.get_commands("RunMode")) commands.extend(self.register.get_commands("BCR")) self.send_commands(commands) time.sleep(0.1) commands = [] commands.extend(self.register.get_commands("ConfMode")) self.send_commands(commands)
[ "def", "reset_bunch_counter", "(", "self", ")", ":", "logging", ".", "info", "(", "'Resetting Bunch Counter'", ")", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"RunMode\"", ")", ")", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"BCR\"", ")", ")", "self", ".", "send_commands", "(", "commands", ")", "time", ".", "sleep", "(", "0.1", ")", "commands", "=", "[", "]", "commands", ".", "extend", "(", "self", ".", "register", ".", "get_commands", "(", "\"ConfMode\"", ")", ")", "self", ".", "send_commands", "(", "commands", ")" ]
Resetting Bunch Counter
[ "Resetting", "Bunch", "Counter" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L182-L193
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
generate_threshold_mask
def generate_threshold_mask(hist): '''Masking array elements when equal 0.0 or greater than 10 times the median Parameters ---------- hist : array_like Input data. Returns ------- masked array Returns copy of the array with masked elements. ''' masked_array = np.ma.masked_values(hist, 0) masked_array = np.ma.masked_greater(masked_array, 10 * np.ma.median(hist)) logging.info('Masking %d pixel(s)', np.ma.count_masked(masked_array)) return np.ma.getmaskarray(masked_array)
python
def generate_threshold_mask(hist): '''Masking array elements when equal 0.0 or greater than 10 times the median Parameters ---------- hist : array_like Input data. Returns ------- masked array Returns copy of the array with masked elements. ''' masked_array = np.ma.masked_values(hist, 0) masked_array = np.ma.masked_greater(masked_array, 10 * np.ma.median(hist)) logging.info('Masking %d pixel(s)', np.ma.count_masked(masked_array)) return np.ma.getmaskarray(masked_array)
[ "def", "generate_threshold_mask", "(", "hist", ")", ":", "masked_array", "=", "np", ".", "ma", ".", "masked_values", "(", "hist", ",", "0", ")", "masked_array", "=", "np", ".", "ma", ".", "masked_greater", "(", "masked_array", ",", "10", "*", "np", ".", "ma", ".", "median", "(", "hist", ")", ")", "logging", ".", "info", "(", "'Masking %d pixel(s)'", ",", "np", ".", "ma", ".", "count_masked", "(", "masked_array", ")", ")", "return", "np", ".", "ma", ".", "getmaskarray", "(", "masked_array", ")" ]
Masking array elements when equal 0.0 or greater than 10 times the median Parameters ---------- hist : array_like Input data. Returns ------- masked array Returns copy of the array with masked elements.
[ "Masking", "array", "elements", "when", "equal", "0", ".", "0", "or", "greater", "than", "10", "times", "the", "median" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L50-L66
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
unique_row
def unique_row(array, use_columns=None, selected_columns_only=False): '''Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row. The returned array can have all columns of the original array or only the columns defined in use_columns. Parameters ---------- array : numpy.ndarray use_columns : list Index of columns to be used to define a unique row selected_columns_only : bool If true only the columns defined in use_columns are returned Returns ------- numpy.ndarray ''' if array.dtype.names is None: # normal array has no named dtype if use_columns is not None: a_cut = array[:, use_columns] else: a_cut = array if len(use_columns) > 1: b = np.ascontiguousarray(a_cut).view(np.dtype((np.void, a_cut.dtype.itemsize * a_cut.shape[1]))) else: b = np.ascontiguousarray(a_cut) _, index = np.unique(b, return_index=True) if not selected_columns_only: return array[np.sort(index)] # sort to preserve order else: return a_cut[np.sort(index)] # sort to preserve order else: # names for dtype founnd --> array is recarray names = list(array.dtype.names) if use_columns is not None: new_names = [names[i] for i in use_columns] else: new_names = names a_cut, index = np.unique(array[new_names], return_index=True) if not selected_columns_only: return array[np.sort(index)] # sort to preserve order else: return array[np.sort(index)][new_names]
python
def unique_row(array, use_columns=None, selected_columns_only=False): '''Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row. The returned array can have all columns of the original array or only the columns defined in use_columns. Parameters ---------- array : numpy.ndarray use_columns : list Index of columns to be used to define a unique row selected_columns_only : bool If true only the columns defined in use_columns are returned Returns ------- numpy.ndarray ''' if array.dtype.names is None: # normal array has no named dtype if use_columns is not None: a_cut = array[:, use_columns] else: a_cut = array if len(use_columns) > 1: b = np.ascontiguousarray(a_cut).view(np.dtype((np.void, a_cut.dtype.itemsize * a_cut.shape[1]))) else: b = np.ascontiguousarray(a_cut) _, index = np.unique(b, return_index=True) if not selected_columns_only: return array[np.sort(index)] # sort to preserve order else: return a_cut[np.sort(index)] # sort to preserve order else: # names for dtype founnd --> array is recarray names = list(array.dtype.names) if use_columns is not None: new_names = [names[i] for i in use_columns] else: new_names = names a_cut, index = np.unique(array[new_names], return_index=True) if not selected_columns_only: return array[np.sort(index)] # sort to preserve order else: return array[np.sort(index)][new_names]
[ "def", "unique_row", "(", "array", ",", "use_columns", "=", "None", ",", "selected_columns_only", "=", "False", ")", ":", "if", "array", ".", "dtype", ".", "names", "is", "None", ":", "# normal array has no named dtype", "if", "use_columns", "is", "not", "None", ":", "a_cut", "=", "array", "[", ":", ",", "use_columns", "]", "else", ":", "a_cut", "=", "array", "if", "len", "(", "use_columns", ")", ">", "1", ":", "b", "=", "np", ".", "ascontiguousarray", "(", "a_cut", ")", ".", "view", "(", "np", ".", "dtype", "(", "(", "np", ".", "void", ",", "a_cut", ".", "dtype", ".", "itemsize", "*", "a_cut", ".", "shape", "[", "1", "]", ")", ")", ")", "else", ":", "b", "=", "np", ".", "ascontiguousarray", "(", "a_cut", ")", "_", ",", "index", "=", "np", ".", "unique", "(", "b", ",", "return_index", "=", "True", ")", "if", "not", "selected_columns_only", ":", "return", "array", "[", "np", ".", "sort", "(", "index", ")", "]", "# sort to preserve order", "else", ":", "return", "a_cut", "[", "np", ".", "sort", "(", "index", ")", "]", "# sort to preserve order", "else", ":", "# names for dtype founnd --> array is recarray", "names", "=", "list", "(", "array", ".", "dtype", ".", "names", ")", "if", "use_columns", "is", "not", "None", ":", "new_names", "=", "[", "names", "[", "i", "]", "for", "i", "in", "use_columns", "]", "else", ":", "new_names", "=", "names", "a_cut", ",", "index", "=", "np", ".", "unique", "(", "array", "[", "new_names", "]", ",", "return_index", "=", "True", ")", "if", "not", "selected_columns_only", ":", "return", "array", "[", "np", ".", "sort", "(", "index", ")", "]", "# sort to preserve order", "else", ":", "return", "array", "[", "np", ".", "sort", "(", "index", ")", "]", "[", "new_names", "]" ]
Takes a numpy array and returns the array reduced to unique rows. If columns are defined only these columns are taken to define a unique row. The returned array can have all columns of the original array or only the columns defined in use_columns. Parameters ---------- array : numpy.ndarray use_columns : list Index of columns to be used to define a unique row selected_columns_only : bool If true only the columns defined in use_columns are returned Returns ------- numpy.ndarray
[ "Takes", "a", "numpy", "array", "and", "returns", "the", "array", "reduced", "to", "unique", "rows", ".", "If", "columns", "are", "defined", "only", "these", "columns", "are", "taken", "to", "define", "a", "unique", "row", ".", "The", "returned", "array", "can", "have", "all", "columns", "of", "the", "original", "array", "or", "only", "the", "columns", "defined", "in", "use_columns", ".", "Parameters", "----------", "array", ":", "numpy", ".", "ndarray", "use_columns", ":", "list", "Index", "of", "columns", "to", "be", "used", "to", "define", "a", "unique", "row", "selected_columns_only", ":", "bool", "If", "true", "only", "the", "columns", "defined", "in", "use_columns", "are", "returned" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L69-L108
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_ranges_from_array
def get_ranges_from_array(arr, append_last=True): '''Takes an array and calculates ranges [start, stop[. The last range end is none to keep the same length. Parameters ---------- arr : array like append_last: bool If True, append item with a pair of last array item and None. Returns ------- numpy.array The array formed by pairs of values by the given array. Example ------- >>> a = np.array((1,2,3,4)) >>> get_ranges_from_array(a, append_last=True) array([[1, 2], [2, 3], [3, 4], [4, None]]) >>> get_ranges_from_array(a, append_last=False) array([[1, 2], [2, 3], [3, 4]]) ''' right = arr[1:] if append_last: left = arr[:] right = np.append(right, None) else: left = arr[:-1] return np.column_stack((left, right))
python
def get_ranges_from_array(arr, append_last=True): '''Takes an array and calculates ranges [start, stop[. The last range end is none to keep the same length. Parameters ---------- arr : array like append_last: bool If True, append item with a pair of last array item and None. Returns ------- numpy.array The array formed by pairs of values by the given array. Example ------- >>> a = np.array((1,2,3,4)) >>> get_ranges_from_array(a, append_last=True) array([[1, 2], [2, 3], [3, 4], [4, None]]) >>> get_ranges_from_array(a, append_last=False) array([[1, 2], [2, 3], [3, 4]]) ''' right = arr[1:] if append_last: left = arr[:] right = np.append(right, None) else: left = arr[:-1] return np.column_stack((left, right))
[ "def", "get_ranges_from_array", "(", "arr", ",", "append_last", "=", "True", ")", ":", "right", "=", "arr", "[", "1", ":", "]", "if", "append_last", ":", "left", "=", "arr", "[", ":", "]", "right", "=", "np", ".", "append", "(", "right", ",", "None", ")", "else", ":", "left", "=", "arr", "[", ":", "-", "1", "]", "return", "np", ".", "column_stack", "(", "(", "left", ",", "right", ")", ")" ]
Takes an array and calculates ranges [start, stop[. The last range end is none to keep the same length. Parameters ---------- arr : array like append_last: bool If True, append item with a pair of last array item and None. Returns ------- numpy.array The array formed by pairs of values by the given array. Example ------- >>> a = np.array((1,2,3,4)) >>> get_ranges_from_array(a, append_last=True) array([[1, 2], [2, 3], [3, 4], [4, None]]) >>> get_ranges_from_array(a, append_last=False) array([[1, 2], [2, 3], [3, 4]])
[ "Takes", "an", "array", "and", "calculates", "ranges", "[", "start", "stop", "[", ".", "The", "last", "range", "end", "is", "none", "to", "keep", "the", "same", "length", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L111-L144
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
in1d_sorted
def in1d_sorted(ar1, ar2): """ Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster. """ if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash return [] inds = ar2.searchsorted(ar1) inds[inds == len(ar2)] = 0 return ar2[inds] == ar1
python
def in1d_sorted(ar1, ar2): """ Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster. """ if ar1.shape[0] == 0 or ar2.shape[0] == 0: # check for empty arrays to avoid crash return [] inds = ar2.searchsorted(ar1) inds[inds == len(ar2)] = 0 return ar2[inds] == ar1
[ "def", "in1d_sorted", "(", "ar1", ",", "ar2", ")", ":", "if", "ar1", ".", "shape", "[", "0", "]", "==", "0", "or", "ar2", ".", "shape", "[", "0", "]", "==", "0", ":", "# check for empty arrays to avoid crash", "return", "[", "]", "inds", "=", "ar2", ".", "searchsorted", "(", "ar1", ")", "inds", "[", "inds", "==", "len", "(", "ar2", ")", "]", "=", "0", "return", "ar2", "[", "inds", "]", "==", "ar1" ]
Does the same than np.in1d but uses the fact that ar1 and ar2 are sorted. Is therefore much faster.
[ "Does", "the", "same", "than", "np", ".", "in1d", "but", "uses", "the", "fact", "that", "ar1", "and", "ar2", "are", "sorted", ".", "Is", "therefore", "much", "faster", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L159-L168
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
central_difference
def central_difference(x, y): '''Returns the dy/dx(x) via central difference method Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") z1 = np.hstack((y[0], y[:-1])) z2 = np.hstack((y[1:], y[-1])) dx1 = np.hstack((0, np.diff(x))) dx2 = np.hstack((np.diff(x), 0)) return (z2 - z1) / (dx2 + dx1)
python
def central_difference(x, y): '''Returns the dy/dx(x) via central difference method Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") z1 = np.hstack((y[0], y[:-1])) z2 = np.hstack((y[1:], y[-1])) dx1 = np.hstack((0, np.diff(x))) dx2 = np.hstack((np.diff(x), 0)) return (z2 - z1) / (dx2 + dx1)
[ "def", "central_difference", "(", "x", ",", "y", ")", ":", "if", "(", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ")", ":", "raise", "ValueError", "(", "\"x, y must have the same length\"", ")", "z1", "=", "np", ".", "hstack", "(", "(", "y", "[", "0", "]", ",", "y", "[", ":", "-", "1", "]", ")", ")", "z2", "=", "np", ".", "hstack", "(", "(", "y", "[", "1", ":", "]", ",", "y", "[", "-", "1", "]", ")", ")", "dx1", "=", "np", ".", "hstack", "(", "(", "0", ",", "np", ".", "diff", "(", "x", ")", ")", ")", "dx2", "=", "np", ".", "hstack", "(", "(", "np", ".", "diff", "(", "x", ")", ",", "0", ")", ")", "return", "(", "z2", "-", "z1", ")", "/", "(", "dx2", "+", "dx1", ")" ]
Returns the dy/dx(x) via central difference method Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like
[ "Returns", "the", "dy", "/", "dx", "(", "x", ")", "via", "central", "difference", "method" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L171-L189
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_profile_histogram
def get_profile_histogram(x, y, n_bins=100): '''Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates the y mean for every bin at the bin center and gives the y mean error as error bars. Parameters ---------- x : array like data x positions y : array like data y positions n_bins : int the number of bins used to create the histogram ''' if len(x) != len(y): raise ValueError('x and y dimensions have to be the same') y = y.astype(np.float32) n, bin_edges = np.histogram(x, bins=n_bins) # needed to calculate the number of points per bin sy = np.histogram(x, bins=n_bins, weights=y)[0] # the sum of the bin values sy2 = np.histogram(x, bins=n_bins, weights=y * y)[0] # the quadratic sum of the bin values bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2 # calculate the bin center for all bins mean = sy / n # calculate the mean of all bins std = np.sqrt((sy2 / n - mean * mean)) # TODO: not understood, need check if this is really the standard deviation std_mean = std / np.sqrt((n - 1)) mean[np.isnan(mean)] = 0. std_mean[np.isnan(std_mean)] = 0. return bin_centers, mean, std_mean
python
def get_profile_histogram(x, y, n_bins=100): '''Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates the y mean for every bin at the bin center and gives the y mean error as error bars. Parameters ---------- x : array like data x positions y : array like data y positions n_bins : int the number of bins used to create the histogram ''' if len(x) != len(y): raise ValueError('x and y dimensions have to be the same') y = y.astype(np.float32) n, bin_edges = np.histogram(x, bins=n_bins) # needed to calculate the number of points per bin sy = np.histogram(x, bins=n_bins, weights=y)[0] # the sum of the bin values sy2 = np.histogram(x, bins=n_bins, weights=y * y)[0] # the quadratic sum of the bin values bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2 # calculate the bin center for all bins mean = sy / n # calculate the mean of all bins std = np.sqrt((sy2 / n - mean * mean)) # TODO: not understood, need check if this is really the standard deviation std_mean = std / np.sqrt((n - 1)) mean[np.isnan(mean)] = 0. std_mean[np.isnan(std_mean)] = 0. return bin_centers, mean, std_mean
[ "def", "get_profile_histogram", "(", "x", ",", "y", ",", "n_bins", "=", "100", ")", ":", "if", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ":", "raise", "ValueError", "(", "'x and y dimensions have to be the same'", ")", "y", "=", "y", ".", "astype", "(", "np", ".", "float32", ")", "n", ",", "bin_edges", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ")", "# needed to calculate the number of points per bin", "sy", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ",", "weights", "=", "y", ")", "[", "0", "]", "# the sum of the bin values", "sy2", "=", "np", ".", "histogram", "(", "x", ",", "bins", "=", "n_bins", ",", "weights", "=", "y", "*", "y", ")", "[", "0", "]", "# the quadratic sum of the bin values", "bin_centers", "=", "(", "bin_edges", "[", "1", ":", "]", "+", "bin_edges", "[", ":", "-", "1", "]", ")", "/", "2", "# calculate the bin center for all bins", "mean", "=", "sy", "/", "n", "# calculate the mean of all bins", "std", "=", "np", ".", "sqrt", "(", "(", "sy2", "/", "n", "-", "mean", "*", "mean", ")", ")", "# TODO: not understood, need check if this is really the standard deviation", "std_mean", "=", "std", "/", "np", ".", "sqrt", "(", "(", "n", "-", "1", ")", ")", "mean", "[", "np", ".", "isnan", "(", "mean", ")", "]", "=", "0.", "std_mean", "[", "np", ".", "isnan", "(", "std_mean", ")", "]", "=", "0.", "return", "bin_centers", ",", "mean", ",", "std_mean" ]
Takes 2D point data (x,y) and creates a profile histogram similar to the TProfile in ROOT. It calculates the y mean for every bin at the bin center and gives the y mean error as error bars. Parameters ---------- x : array like data x positions y : array like data y positions n_bins : int the number of bins used to create the histogram
[ "Takes", "2D", "point", "data", "(", "x", "y", ")", "and", "creates", "a", "profile", "histogram", "similar", "to", "the", "TProfile", "in", "ROOT", ".", "It", "calculates", "the", "y", "mean", "for", "every", "bin", "at", "the", "bin", "center", "and", "gives", "the", "y", "mean", "error", "as", "error", "bars", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L192-L217
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_rate_normalization
def get_rate_normalization(hit_file, parameter, reference='event', cluster_file=None, plot=False, chunk_size=500000): ''' Takes different hit files (hit_files), extracts the number of events or the scan time (reference) per scan parameter (parameter) and returns an array with a normalization factor. This normalization factor has the length of the number of different parameters. If a cluster_file is specified also the number of cluster per event are used to create the normalization factor. Parameters ---------- hit_files : string parameter : string reference : string plot : bool Returns ------- numpy.ndarray ''' logging.info('Calculate the rate normalization') with tb.open_file(hit_file, mode="r+") as in_hit_file_h5: # open the hit file meta_data = in_hit_file_h5.root.meta_data[:] scan_parameter = get_scan_parameter(meta_data)[parameter] event_numbers = get_meta_data_at_scan_parameter(meta_data, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes event_range = get_ranges_from_array(event_numbers) normalization_rate = [] normalization_multiplicity = [] try: event_range[-1, 1] = in_hit_file_h5.root.Hits[-1]['event_number'] + 1 except tb.NoSuchNodeError: logging.error('Cannot find hits table') return # calculate rate normalization from the event rate for triggered data / measurement time for self triggered data for each scan parameter if reference == 'event': n_events = event_range[:, 1] - event_range[:, 0] # number of events for every parameter setting normalization_rate.extend(n_events) elif reference == 'time': time_start = get_meta_data_at_scan_parameter(meta_data, parameter)['timestamp_start'] time_spend = np.diff(time_start) time_spend = np.append(time_spend, meta_data[-1]['timestamp_stop'] - time_start[-1]) # TODO: needs check, add last missing entry normalization_rate.extend(time_spend) else: raise NotImplementedError('The normalization reference ' + reference + ' is not implemented') if cluster_file: # calculate the rate normalization from the mean number of hits per event per scan parameter, needed for beam data since a beam since the multiplicity is rarely constant cluster_table = in_hit_file_h5.root.Cluster index_event_number(cluster_table) index = 0 # index where to start the read out, 0 at the beginning, increased during looping, variable for read speed up best_chunk_size = chunk_size # variable for read speed up total_cluster = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for start_event, stop_event in event_range: # loop over the selected events readout_cluster_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up n_cluster_per_event = None for clusters, index in data_aligned_at_events(cluster_table, start_event_number=start_event, stop_event_number=stop_event, start_index=index, chunk_size=best_chunk_size): if n_cluster_per_event is None: n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1] # array with the number of cluster per event, cluster per event are at least 1 else: n_cluster_per_event = np.append(n_cluster_per_event, analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1]) readout_cluster_len += clusters.shape[0] total_cluster += clusters.shape[0] progress_bar.update(index) best_chunk_size = int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction normalization_multiplicity.append(np.mean(n_cluster_per_event)) progress_bar.finish() if total_cluster != cluster_table.shape[0]: logging.warning('Analysis shows inconsistent number of cluster (%d != %d). Check needed!', total_cluster, cluster_table.shape[0]) if plot: x = scan_parameter if reference == 'event': plotting.plot_scatter(x, normalization_rate, title='Events per ' + parameter + ' setting', x_label=parameter, y_label='# events', log_x=True, filename=os.path.splitext(hit_file)[0] + '_n_event_normalization.pdf') elif reference == 'time': plotting.plot_scatter(x, normalization_rate, title='Measuring time per GDAC setting', x_label=parameter, y_label='time [s]', log_x=True, filename=os.path.splitext(hit_file)[0] + '_time_normalization.pdf') if cluster_file: plotting.plot_scatter(x, normalization_multiplicity, title='Mean number of particles per event', x_label=parameter, y_label='number of hits per event', log_x=True, filename=os.path.splitext(hit_file)[0] + '_n_particles_normalization.pdf') if cluster_file: normalization_rate = np.array(normalization_rate) normalization_multiplicity = np.array(normalization_multiplicity) return np.amax(normalization_rate * normalization_multiplicity).astype('f16') / (normalization_rate * normalization_multiplicity) return np.amax(np.array(normalization_rate)).astype('f16') / np.array(normalization_rate)
python
def get_rate_normalization(hit_file, parameter, reference='event', cluster_file=None, plot=False, chunk_size=500000): ''' Takes different hit files (hit_files), extracts the number of events or the scan time (reference) per scan parameter (parameter) and returns an array with a normalization factor. This normalization factor has the length of the number of different parameters. If a cluster_file is specified also the number of cluster per event are used to create the normalization factor. Parameters ---------- hit_files : string parameter : string reference : string plot : bool Returns ------- numpy.ndarray ''' logging.info('Calculate the rate normalization') with tb.open_file(hit_file, mode="r+") as in_hit_file_h5: # open the hit file meta_data = in_hit_file_h5.root.meta_data[:] scan_parameter = get_scan_parameter(meta_data)[parameter] event_numbers = get_meta_data_at_scan_parameter(meta_data, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes event_range = get_ranges_from_array(event_numbers) normalization_rate = [] normalization_multiplicity = [] try: event_range[-1, 1] = in_hit_file_h5.root.Hits[-1]['event_number'] + 1 except tb.NoSuchNodeError: logging.error('Cannot find hits table') return # calculate rate normalization from the event rate for triggered data / measurement time for self triggered data for each scan parameter if reference == 'event': n_events = event_range[:, 1] - event_range[:, 0] # number of events for every parameter setting normalization_rate.extend(n_events) elif reference == 'time': time_start = get_meta_data_at_scan_parameter(meta_data, parameter)['timestamp_start'] time_spend = np.diff(time_start) time_spend = np.append(time_spend, meta_data[-1]['timestamp_stop'] - time_start[-1]) # TODO: needs check, add last missing entry normalization_rate.extend(time_spend) else: raise NotImplementedError('The normalization reference ' + reference + ' is not implemented') if cluster_file: # calculate the rate normalization from the mean number of hits per event per scan parameter, needed for beam data since a beam since the multiplicity is rarely constant cluster_table = in_hit_file_h5.root.Cluster index_event_number(cluster_table) index = 0 # index where to start the read out, 0 at the beginning, increased during looping, variable for read speed up best_chunk_size = chunk_size # variable for read speed up total_cluster = 0 progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_table.shape[0], term_width=80) progress_bar.start() for start_event, stop_event in event_range: # loop over the selected events readout_cluster_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up n_cluster_per_event = None for clusters, index in data_aligned_at_events(cluster_table, start_event_number=start_event, stop_event_number=stop_event, start_index=index, chunk_size=best_chunk_size): if n_cluster_per_event is None: n_cluster_per_event = analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1] # array with the number of cluster per event, cluster per event are at least 1 else: n_cluster_per_event = np.append(n_cluster_per_event, analysis_utils.get_n_cluster_in_events(clusters['event_number'])[:, 1]) readout_cluster_len += clusters.shape[0] total_cluster += clusters.shape[0] progress_bar.update(index) best_chunk_size = int(1.5 * readout_cluster_len) if int(1.05 * readout_cluster_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction normalization_multiplicity.append(np.mean(n_cluster_per_event)) progress_bar.finish() if total_cluster != cluster_table.shape[0]: logging.warning('Analysis shows inconsistent number of cluster (%d != %d). Check needed!', total_cluster, cluster_table.shape[0]) if plot: x = scan_parameter if reference == 'event': plotting.plot_scatter(x, normalization_rate, title='Events per ' + parameter + ' setting', x_label=parameter, y_label='# events', log_x=True, filename=os.path.splitext(hit_file)[0] + '_n_event_normalization.pdf') elif reference == 'time': plotting.plot_scatter(x, normalization_rate, title='Measuring time per GDAC setting', x_label=parameter, y_label='time [s]', log_x=True, filename=os.path.splitext(hit_file)[0] + '_time_normalization.pdf') if cluster_file: plotting.plot_scatter(x, normalization_multiplicity, title='Mean number of particles per event', x_label=parameter, y_label='number of hits per event', log_x=True, filename=os.path.splitext(hit_file)[0] + '_n_particles_normalization.pdf') if cluster_file: normalization_rate = np.array(normalization_rate) normalization_multiplicity = np.array(normalization_multiplicity) return np.amax(normalization_rate * normalization_multiplicity).astype('f16') / (normalization_rate * normalization_multiplicity) return np.amax(np.array(normalization_rate)).astype('f16') / np.array(normalization_rate)
[ "def", "get_rate_normalization", "(", "hit_file", ",", "parameter", ",", "reference", "=", "'event'", ",", "cluster_file", "=", "None", ",", "plot", "=", "False", ",", "chunk_size", "=", "500000", ")", ":", "logging", ".", "info", "(", "'Calculate the rate normalization'", ")", "with", "tb", ".", "open_file", "(", "hit_file", ",", "mode", "=", "\"r+\"", ")", "as", "in_hit_file_h5", ":", "# open the hit file", "meta_data", "=", "in_hit_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "scan_parameter", "=", "get_scan_parameter", "(", "meta_data", ")", "[", "parameter", "]", "event_numbers", "=", "get_meta_data_at_scan_parameter", "(", "meta_data", ",", "parameter", ")", "[", "'event_number'", "]", "# get the event numbers in meta_data where the scan parameter changes", "event_range", "=", "get_ranges_from_array", "(", "event_numbers", ")", "normalization_rate", "=", "[", "]", "normalization_multiplicity", "=", "[", "]", "try", ":", "event_range", "[", "-", "1", ",", "1", "]", "=", "in_hit_file_h5", ".", "root", ".", "Hits", "[", "-", "1", "]", "[", "'event_number'", "]", "+", "1", "except", "tb", ".", "NoSuchNodeError", ":", "logging", ".", "error", "(", "'Cannot find hits table'", ")", "return", "# calculate rate normalization from the event rate for triggered data / measurement time for self triggered data for each scan parameter", "if", "reference", "==", "'event'", ":", "n_events", "=", "event_range", "[", ":", ",", "1", "]", "-", "event_range", "[", ":", ",", "0", "]", "# number of events for every parameter setting", "normalization_rate", ".", "extend", "(", "n_events", ")", "elif", "reference", "==", "'time'", ":", "time_start", "=", "get_meta_data_at_scan_parameter", "(", "meta_data", ",", "parameter", ")", "[", "'timestamp_start'", "]", "time_spend", "=", "np", ".", "diff", "(", "time_start", ")", "time_spend", "=", "np", ".", "append", "(", "time_spend", ",", "meta_data", "[", "-", "1", "]", "[", "'timestamp_stop'", "]", "-", "time_start", "[", "-", "1", "]", ")", "# TODO: needs check, add last missing entry", "normalization_rate", ".", "extend", "(", "time_spend", ")", "else", ":", "raise", "NotImplementedError", "(", "'The normalization reference '", "+", "reference", "+", "' is not implemented'", ")", "if", "cluster_file", ":", "# calculate the rate normalization from the mean number of hits per event per scan parameter, needed for beam data since a beam since the multiplicity is rarely constant", "cluster_table", "=", "in_hit_file_h5", ".", "root", ".", "Cluster", "index_event_number", "(", "cluster_table", ")", "index", "=", "0", "# index where to start the read out, 0 at the beginning, increased during looping, variable for read speed up", "best_chunk_size", "=", "chunk_size", "# variable for read speed up", "total_cluster", "=", "0", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "cluster_table", ".", "shape", "[", "0", "]", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "for", "start_event", ",", "stop_event", "in", "event_range", ":", "# loop over the selected events", "readout_cluster_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "n_cluster_per_event", "=", "None", "for", "clusters", ",", "index", "in", "data_aligned_at_events", "(", "cluster_table", ",", "start_event_number", "=", "start_event", ",", "stop_event_number", "=", "stop_event", ",", "start_index", "=", "index", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "if", "n_cluster_per_event", "is", "None", ":", "n_cluster_per_event", "=", "analysis_utils", ".", "get_n_cluster_in_events", "(", "clusters", "[", "'event_number'", "]", ")", "[", ":", ",", "1", "]", "# array with the number of cluster per event, cluster per event are at least 1", "else", ":", "n_cluster_per_event", "=", "np", ".", "append", "(", "n_cluster_per_event", ",", "analysis_utils", ".", "get_n_cluster_in_events", "(", "clusters", "[", "'event_number'", "]", ")", "[", ":", ",", "1", "]", ")", "readout_cluster_len", "+=", "clusters", ".", "shape", "[", "0", "]", "total_cluster", "+=", "clusters", ".", "shape", "[", "0", "]", "progress_bar", ".", "update", "(", "index", ")", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_cluster_len", ")", "if", "int", "(", "1.05", "*", "readout_cluster_len", ")", "<", "chunk_size", "else", "chunk_size", "# to increase the readout speed, estimated the number of hits for one read instruction", "normalization_multiplicity", ".", "append", "(", "np", ".", "mean", "(", "n_cluster_per_event", ")", ")", "progress_bar", ".", "finish", "(", ")", "if", "total_cluster", "!=", "cluster_table", ".", "shape", "[", "0", "]", ":", "logging", ".", "warning", "(", "'Analysis shows inconsistent number of cluster (%d != %d). Check needed!'", ",", "total_cluster", ",", "cluster_table", ".", "shape", "[", "0", "]", ")", "if", "plot", ":", "x", "=", "scan_parameter", "if", "reference", "==", "'event'", ":", "plotting", ".", "plot_scatter", "(", "x", ",", "normalization_rate", ",", "title", "=", "'Events per '", "+", "parameter", "+", "' setting'", ",", "x_label", "=", "parameter", ",", "y_label", "=", "'# events'", ",", "log_x", "=", "True", ",", "filename", "=", "os", ".", "path", ".", "splitext", "(", "hit_file", ")", "[", "0", "]", "+", "'_n_event_normalization.pdf'", ")", "elif", "reference", "==", "'time'", ":", "plotting", ".", "plot_scatter", "(", "x", ",", "normalization_rate", ",", "title", "=", "'Measuring time per GDAC setting'", ",", "x_label", "=", "parameter", ",", "y_label", "=", "'time [s]'", ",", "log_x", "=", "True", ",", "filename", "=", "os", ".", "path", ".", "splitext", "(", "hit_file", ")", "[", "0", "]", "+", "'_time_normalization.pdf'", ")", "if", "cluster_file", ":", "plotting", ".", "plot_scatter", "(", "x", ",", "normalization_multiplicity", ",", "title", "=", "'Mean number of particles per event'", ",", "x_label", "=", "parameter", ",", "y_label", "=", "'number of hits per event'", ",", "log_x", "=", "True", ",", "filename", "=", "os", ".", "path", ".", "splitext", "(", "hit_file", ")", "[", "0", "]", "+", "'_n_particles_normalization.pdf'", ")", "if", "cluster_file", ":", "normalization_rate", "=", "np", ".", "array", "(", "normalization_rate", ")", "normalization_multiplicity", "=", "np", ".", "array", "(", "normalization_multiplicity", ")", "return", "np", ".", "amax", "(", "normalization_rate", "*", "normalization_multiplicity", ")", ".", "astype", "(", "'f16'", ")", "/", "(", "normalization_rate", "*", "normalization_multiplicity", ")", "return", "np", ".", "amax", "(", "np", ".", "array", "(", "normalization_rate", ")", ")", ".", "astype", "(", "'f16'", ")", "/", "np", ".", "array", "(", "normalization_rate", ")" ]
Takes different hit files (hit_files), extracts the number of events or the scan time (reference) per scan parameter (parameter) and returns an array with a normalization factor. This normalization factor has the length of the number of different parameters. If a cluster_file is specified also the number of cluster per event are used to create the normalization factor. Parameters ---------- hit_files : string parameter : string reference : string plot : bool Returns ------- numpy.ndarray
[ "Takes", "different", "hit", "files", "(", "hit_files", ")", "extracts", "the", "number", "of", "events", "or", "the", "scan", "time", "(", "reference", ")", "per", "scan", "parameter", "(", "parameter", ")", "and", "returns", "an", "array", "with", "a", "normalization", "factor", ".", "This", "normalization", "factor", "has", "the", "length", "of", "the", "number", "of", "different", "parameters", ".", "If", "a", "cluster_file", "is", "specified", "also", "the", "number", "of", "cluster", "per", "event", "are", "used", "to", "create", "the", "normalization", "factor", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L220-L300
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_parameter_value_from_file_names
def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True): """ Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name in the first dimension and the corresponding parameter value in the second. The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and mapped to the file name that occurred last in the files list. Parameters ---------- files : list of strings parameter : string or list of strings unique : bool sort : bool Returns ------- collections.OrderedDict """ # unique=False logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if parameters is None: # special case, no parameter defined return files_dict if isinstance(parameters, basestring): parameters = (parameters, ) search_string = '_'.join(parameters) for _ in parameters: search_string += r'_(-?\d+)' result = {} for one_file in files: parameter_values = re.findall(search_string, one_file) if parameter_values: if isinstance(parameter_values[0], tuple): parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values)) parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int files_dict[one_file] = dict(zip(parameters, parameter_values)) if unique: # reduce to the files with different scan parameters for key, value in files_dict.items(): if value not in result.values(): result[key] = value else: result[one_file] = files_dict[one_file] return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict)
python
def get_parameter_value_from_file_names(files, parameters=None, unique=False, sort=True): """ Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name in the first dimension and the corresponding parameter value in the second. The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and mapped to the file name that occurred last in the files list. Parameters ---------- files : list of strings parameter : string or list of strings unique : bool sort : bool Returns ------- collections.OrderedDict """ # unique=False logging.debug('Get the parameter: ' + str(parameters) + ' values from the file names of ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if parameters is None: # special case, no parameter defined return files_dict if isinstance(parameters, basestring): parameters = (parameters, ) search_string = '_'.join(parameters) for _ in parameters: search_string += r'_(-?\d+)' result = {} for one_file in files: parameter_values = re.findall(search_string, one_file) if parameter_values: if isinstance(parameter_values[0], tuple): parameter_values = list(reduce(lambda t1, t2: t1 + t2, parameter_values)) parameter_values = [[int(i), ] for i in parameter_values] # convert string value to list with int files_dict[one_file] = dict(zip(parameters, parameter_values)) if unique: # reduce to the files with different scan parameters for key, value in files_dict.items(): if value not in result.values(): result[key] = value else: result[one_file] = files_dict[one_file] return collections.OrderedDict(sorted(result.iteritems(), key=itemgetter(1)) if sort else files_dict)
[ "def", "get_parameter_value_from_file_names", "(", "files", ",", "parameters", "=", "None", ",", "unique", "=", "False", ",", "sort", "=", "True", ")", ":", "# unique=False", "logging", ".", "debug", "(", "'Get the parameter: '", "+", "str", "(", "parameters", ")", "+", "' values from the file names of '", "+", "str", "(", "len", "(", "files", ")", ")", "+", "' files'", ")", "files_dict", "=", "collections", ".", "OrderedDict", "(", ")", "if", "parameters", "is", "None", ":", "# special case, no parameter defined", "return", "files_dict", "if", "isinstance", "(", "parameters", ",", "basestring", ")", ":", "parameters", "=", "(", "parameters", ",", ")", "search_string", "=", "'_'", ".", "join", "(", "parameters", ")", "for", "_", "in", "parameters", ":", "search_string", "+=", "r'_(-?\\d+)'", "result", "=", "{", "}", "for", "one_file", "in", "files", ":", "parameter_values", "=", "re", ".", "findall", "(", "search_string", ",", "one_file", ")", "if", "parameter_values", ":", "if", "isinstance", "(", "parameter_values", "[", "0", "]", ",", "tuple", ")", ":", "parameter_values", "=", "list", "(", "reduce", "(", "lambda", "t1", ",", "t2", ":", "t1", "+", "t2", ",", "parameter_values", ")", ")", "parameter_values", "=", "[", "[", "int", "(", "i", ")", ",", "]", "for", "i", "in", "parameter_values", "]", "# convert string value to list with int", "files_dict", "[", "one_file", "]", "=", "dict", "(", "zip", "(", "parameters", ",", "parameter_values", ")", ")", "if", "unique", ":", "# reduce to the files with different scan parameters", "for", "key", ",", "value", "in", "files_dict", ".", "items", "(", ")", ":", "if", "value", "not", "in", "result", ".", "values", "(", ")", ":", "result", "[", "key", "]", "=", "value", "else", ":", "result", "[", "one_file", "]", "=", "files_dict", "[", "one_file", "]", "return", "collections", ".", "OrderedDict", "(", "sorted", "(", "result", ".", "iteritems", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "if", "sort", "else", "files_dict", ")" ]
Takes a list of files, searches for the parameter name in the file name and returns a ordered dict with the file name in the first dimension and the corresponding parameter value in the second. The file names can be sorted by the parameter value, otherwise the order is kept. If unique is true every parameter is unique and mapped to the file name that occurred last in the files list. Parameters ---------- files : list of strings parameter : string or list of strings unique : bool sort : bool Returns ------- collections.OrderedDict
[ "Takes", "a", "list", "of", "files", "searches", "for", "the", "parameter", "name", "in", "the", "file", "name", "and", "returns", "a", "ordered", "dict", "with", "the", "file", "name", "in", "the", "first", "dimension", "and", "the", "corresponding", "parameter", "value", "in", "the", "second", ".", "The", "file", "names", "can", "be", "sorted", "by", "the", "parameter", "value", "otherwise", "the", "order", "is", "kept", ".", "If", "unique", "is", "true", "every", "parameter", "is", "unique", "and", "mapped", "to", "the", "file", "name", "that", "occurred", "last", "in", "the", "files", "list", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L361-L404
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_data_file_names_from_scan_base
def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True): """ Generate a list of .h5 files which have a similar file name. Parameters ---------- scan_base : list, string List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically. filter : list, string List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter. sort_by_time : bool If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files. meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. Returns ------- data_files : list List of file names matching the obove conditions. """ data_files = [] if scan_base is None: return data_files if isinstance(scan_base, basestring): scan_base = [scan_base] for scan_base_str in scan_base: if '.h5' == os.path.splitext(scan_base_str)[1]: data_files.append(scan_base_str) else: data_files.extend(glob.glob(scan_base_str + '*.h5')) if filter_str: if isinstance(filter_str, basestring): filter_str = [filter_str] data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files) if sort_by_time and len(data_files) > 1: f_list = {} for data_file in data_files: with tb.open_file(data_file, mode="r") as h5_file: try: meta_data = h5_file.root.meta_data except tb.NoSuchNodeError: logging.warning("File %s is missing meta_data" % h5_file.filename) else: try: if meta_data_v2: timestamp = meta_data[0]["timestamp_start"] else: timestamp = meta_data[0]["timestamp"] except IndexError: logging.info("File %s has empty meta_data" % h5_file.filename) else: f_list[data_file] = timestamp data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False)) return data_files
python
def get_data_file_names_from_scan_base(scan_base, filter_str=['_analyzed.h5', '_interpreted.h5', '_cut.h5', '_result.h5', '_hists.h5'], sort_by_time=True, meta_data_v2=True): """ Generate a list of .h5 files which have a similar file name. Parameters ---------- scan_base : list, string List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically. filter : list, string List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter. sort_by_time : bool If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files. meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. Returns ------- data_files : list List of file names matching the obove conditions. """ data_files = [] if scan_base is None: return data_files if isinstance(scan_base, basestring): scan_base = [scan_base] for scan_base_str in scan_base: if '.h5' == os.path.splitext(scan_base_str)[1]: data_files.append(scan_base_str) else: data_files.extend(glob.glob(scan_base_str + '*.h5')) if filter_str: if isinstance(filter_str, basestring): filter_str = [filter_str] data_files = filter(lambda data_file: not any([(True if x in data_file else False) for x in filter_str]), data_files) if sort_by_time and len(data_files) > 1: f_list = {} for data_file in data_files: with tb.open_file(data_file, mode="r") as h5_file: try: meta_data = h5_file.root.meta_data except tb.NoSuchNodeError: logging.warning("File %s is missing meta_data" % h5_file.filename) else: try: if meta_data_v2: timestamp = meta_data[0]["timestamp_start"] else: timestamp = meta_data[0]["timestamp"] except IndexError: logging.info("File %s has empty meta_data" % h5_file.filename) else: f_list[data_file] = timestamp data_files = list(sorted(f_list, key=f_list.__getitem__, reverse=False)) return data_files
[ "def", "get_data_file_names_from_scan_base", "(", "scan_base", ",", "filter_str", "=", "[", "'_analyzed.h5'", ",", "'_interpreted.h5'", ",", "'_cut.h5'", ",", "'_result.h5'", ",", "'_hists.h5'", "]", ",", "sort_by_time", "=", "True", ",", "meta_data_v2", "=", "True", ")", ":", "data_files", "=", "[", "]", "if", "scan_base", "is", "None", ":", "return", "data_files", "if", "isinstance", "(", "scan_base", ",", "basestring", ")", ":", "scan_base", "=", "[", "scan_base", "]", "for", "scan_base_str", "in", "scan_base", ":", "if", "'.h5'", "==", "os", ".", "path", ".", "splitext", "(", "scan_base_str", ")", "[", "1", "]", ":", "data_files", ".", "append", "(", "scan_base_str", ")", "else", ":", "data_files", ".", "extend", "(", "glob", ".", "glob", "(", "scan_base_str", "+", "'*.h5'", ")", ")", "if", "filter_str", ":", "if", "isinstance", "(", "filter_str", ",", "basestring", ")", ":", "filter_str", "=", "[", "filter_str", "]", "data_files", "=", "filter", "(", "lambda", "data_file", ":", "not", "any", "(", "[", "(", "True", "if", "x", "in", "data_file", "else", "False", ")", "for", "x", "in", "filter_str", "]", ")", ",", "data_files", ")", "if", "sort_by_time", "and", "len", "(", "data_files", ")", ">", "1", ":", "f_list", "=", "{", "}", "for", "data_file", "in", "data_files", ":", "with", "tb", ".", "open_file", "(", "data_file", ",", "mode", "=", "\"r\"", ")", "as", "h5_file", ":", "try", ":", "meta_data", "=", "h5_file", ".", "root", ".", "meta_data", "except", "tb", ".", "NoSuchNodeError", ":", "logging", ".", "warning", "(", "\"File %s is missing meta_data\"", "%", "h5_file", ".", "filename", ")", "else", ":", "try", ":", "if", "meta_data_v2", ":", "timestamp", "=", "meta_data", "[", "0", "]", "[", "\"timestamp_start\"", "]", "else", ":", "timestamp", "=", "meta_data", "[", "0", "]", "[", "\"timestamp\"", "]", "except", "IndexError", ":", "logging", ".", "info", "(", "\"File %s has empty meta_data\"", "%", "h5_file", ".", "filename", ")", "else", ":", "f_list", "[", "data_file", "]", "=", "timestamp", "data_files", "=", "list", "(", "sorted", "(", "f_list", ",", "key", "=", "f_list", ".", "__getitem__", ",", "reverse", "=", "False", ")", ")", "return", "data_files" ]
Generate a list of .h5 files which have a similar file name. Parameters ---------- scan_base : list, string List of string or string of the scan base names. The scan_base will be used to search for files containing the string. The .h5 file extension will be added automatically. filter : list, string List of string or string which are used to filter the returned filenames. File names containing filter_str in the file name will not be returned. Use None to disable filter. sort_by_time : bool If True, return file name list sorted from oldest to newest. The time from meta table will be used to sort the files. meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. Returns ------- data_files : list List of file names matching the obove conditions.
[ "Generate", "a", "list", "of", ".", "h5", "files", "which", "have", "a", "similar", "file", "name", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L407-L462
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_parameter_from_files
def get_parameter_from_files(files, parameters=None, unique=False, sort=True): ''' Takes a list of files, searches for the parameter name in the file name and in the file. Returns a ordered dict with the file name in the first dimension and the corresponding parameter values in the second. If a scan parameter appears in the file name and in the file the first parameter setting has to be in the file name, otherwise a warning is shown. The file names can be sorted by the first parameter value of each file. Parameters ---------- files : string, list of strings parameters : string, list of strings unique : boolean If set only one file per scan parameter value is used. sort : boolean Returns ------- collections.OrderedDict ''' logging.debug('Get the parameter ' + str(parameters) + ' values from ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if isinstance(files, basestring): files = (files, ) if isinstance(parameters, basestring): parameters = (parameters, ) parameter_values_from_file_names_dict = get_parameter_value_from_file_names(files, parameters, unique=unique, sort=sort) # get the parameter from the file name for file_name in files: with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file scan_parameter_values = collections.OrderedDict() try: scan_parameters = in_file_h5.root.scan_parameters[:] # get the scan parameters from the scan parameter table if parameters is None: parameters = get_scan_parameter_names(scan_parameters) for parameter in parameters: try: scan_parameter_values[parameter] = np.unique(scan_parameters[parameter]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # scan parameter table does not exist try: scan_parameters = get_scan_parameter(in_file_h5.root.meta_data[:]) # get the scan parameters from the meta data if scan_parameters: try: scan_parameter_values = np.unique(scan_parameters[parameters]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # meta data table does not exist pass if not scan_parameter_values: # if no scan parameter values could be set from file take the parameter found in the file name try: scan_parameter_values = parameter_values_from_file_names_dict[file_name] except KeyError: # no scan parameter found at all, neither in the file name nor in the file scan_parameter_values = None else: # use the parameter given in the file and cross check if it matches the file name parameter if these is given try: for key, value in scan_parameter_values.items(): if value and value[0] != parameter_values_from_file_names_dict[file_name][key][0]: # parameter value exists: check if the first value is the file name value logging.warning('Parameter values in the file name and in the file differ. Take ' + str(key) + ' parameters ' + str(value) + ' found in %s.', file_name) except KeyError: # parameter does not exists in the file name pass except IndexError: raise IncompleteInputError('Something wrong check!') if unique and scan_parameter_values is not None: existing = False for parameter in scan_parameter_values: # loop to determine if any value of any scan parameter exists already all_par_values = [values[parameter] for values in files_dict.values()] if any(x in [scan_parameter_values[parameter]] for x in all_par_values): existing = True break if not existing: files_dict[file_name] = scan_parameter_values else: logging.warning('Scan parameter value(s) from %s exists already, do not add to result', file_name) else: files_dict[file_name] = scan_parameter_values return collections.OrderedDict(sorted(files_dict.iteritems(), key=itemgetter(1)) if sort else files_dict)
python
def get_parameter_from_files(files, parameters=None, unique=False, sort=True): ''' Takes a list of files, searches for the parameter name in the file name and in the file. Returns a ordered dict with the file name in the first dimension and the corresponding parameter values in the second. If a scan parameter appears in the file name and in the file the first parameter setting has to be in the file name, otherwise a warning is shown. The file names can be sorted by the first parameter value of each file. Parameters ---------- files : string, list of strings parameters : string, list of strings unique : boolean If set only one file per scan parameter value is used. sort : boolean Returns ------- collections.OrderedDict ''' logging.debug('Get the parameter ' + str(parameters) + ' values from ' + str(len(files)) + ' files') files_dict = collections.OrderedDict() if isinstance(files, basestring): files = (files, ) if isinstance(parameters, basestring): parameters = (parameters, ) parameter_values_from_file_names_dict = get_parameter_value_from_file_names(files, parameters, unique=unique, sort=sort) # get the parameter from the file name for file_name in files: with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file scan_parameter_values = collections.OrderedDict() try: scan_parameters = in_file_h5.root.scan_parameters[:] # get the scan parameters from the scan parameter table if parameters is None: parameters = get_scan_parameter_names(scan_parameters) for parameter in parameters: try: scan_parameter_values[parameter] = np.unique(scan_parameters[parameter]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # scan parameter table does not exist try: scan_parameters = get_scan_parameter(in_file_h5.root.meta_data[:]) # get the scan parameters from the meta data if scan_parameters: try: scan_parameter_values = np.unique(scan_parameters[parameters]).tolist() # different scan parameter values used except ValueError: # the scan parameter does not exists pass except tb.NoSuchNodeError: # meta data table does not exist pass if not scan_parameter_values: # if no scan parameter values could be set from file take the parameter found in the file name try: scan_parameter_values = parameter_values_from_file_names_dict[file_name] except KeyError: # no scan parameter found at all, neither in the file name nor in the file scan_parameter_values = None else: # use the parameter given in the file and cross check if it matches the file name parameter if these is given try: for key, value in scan_parameter_values.items(): if value and value[0] != parameter_values_from_file_names_dict[file_name][key][0]: # parameter value exists: check if the first value is the file name value logging.warning('Parameter values in the file name and in the file differ. Take ' + str(key) + ' parameters ' + str(value) + ' found in %s.', file_name) except KeyError: # parameter does not exists in the file name pass except IndexError: raise IncompleteInputError('Something wrong check!') if unique and scan_parameter_values is not None: existing = False for parameter in scan_parameter_values: # loop to determine if any value of any scan parameter exists already all_par_values = [values[parameter] for values in files_dict.values()] if any(x in [scan_parameter_values[parameter]] for x in all_par_values): existing = True break if not existing: files_dict[file_name] = scan_parameter_values else: logging.warning('Scan parameter value(s) from %s exists already, do not add to result', file_name) else: files_dict[file_name] = scan_parameter_values return collections.OrderedDict(sorted(files_dict.iteritems(), key=itemgetter(1)) if sort else files_dict)
[ "def", "get_parameter_from_files", "(", "files", ",", "parameters", "=", "None", ",", "unique", "=", "False", ",", "sort", "=", "True", ")", ":", "logging", ".", "debug", "(", "'Get the parameter '", "+", "str", "(", "parameters", ")", "+", "' values from '", "+", "str", "(", "len", "(", "files", ")", ")", "+", "' files'", ")", "files_dict", "=", "collections", ".", "OrderedDict", "(", ")", "if", "isinstance", "(", "files", ",", "basestring", ")", ":", "files", "=", "(", "files", ",", ")", "if", "isinstance", "(", "parameters", ",", "basestring", ")", ":", "parameters", "=", "(", "parameters", ",", ")", "parameter_values_from_file_names_dict", "=", "get_parameter_value_from_file_names", "(", "files", ",", "parameters", ",", "unique", "=", "unique", ",", "sort", "=", "sort", ")", "# get the parameter from the file name", "for", "file_name", "in", "files", ":", "with", "tb", ".", "open_file", "(", "file_name", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "# open the actual file", "scan_parameter_values", "=", "collections", ".", "OrderedDict", "(", ")", "try", ":", "scan_parameters", "=", "in_file_h5", ".", "root", ".", "scan_parameters", "[", ":", "]", "# get the scan parameters from the scan parameter table", "if", "parameters", "is", "None", ":", "parameters", "=", "get_scan_parameter_names", "(", "scan_parameters", ")", "for", "parameter", "in", "parameters", ":", "try", ":", "scan_parameter_values", "[", "parameter", "]", "=", "np", ".", "unique", "(", "scan_parameters", "[", "parameter", "]", ")", ".", "tolist", "(", ")", "# different scan parameter values used", "except", "ValueError", ":", "# the scan parameter does not exists", "pass", "except", "tb", ".", "NoSuchNodeError", ":", "# scan parameter table does not exist", "try", ":", "scan_parameters", "=", "get_scan_parameter", "(", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", ")", "# get the scan parameters from the meta data", "if", "scan_parameters", ":", "try", ":", "scan_parameter_values", "=", "np", ".", "unique", "(", "scan_parameters", "[", "parameters", "]", ")", ".", "tolist", "(", ")", "# different scan parameter values used", "except", "ValueError", ":", "# the scan parameter does not exists", "pass", "except", "tb", ".", "NoSuchNodeError", ":", "# meta data table does not exist", "pass", "if", "not", "scan_parameter_values", ":", "# if no scan parameter values could be set from file take the parameter found in the file name", "try", ":", "scan_parameter_values", "=", "parameter_values_from_file_names_dict", "[", "file_name", "]", "except", "KeyError", ":", "# no scan parameter found at all, neither in the file name nor in the file", "scan_parameter_values", "=", "None", "else", ":", "# use the parameter given in the file and cross check if it matches the file name parameter if these is given", "try", ":", "for", "key", ",", "value", "in", "scan_parameter_values", ".", "items", "(", ")", ":", "if", "value", "and", "value", "[", "0", "]", "!=", "parameter_values_from_file_names_dict", "[", "file_name", "]", "[", "key", "]", "[", "0", "]", ":", "# parameter value exists: check if the first value is the file name value", "logging", ".", "warning", "(", "'Parameter values in the file name and in the file differ. Take '", "+", "str", "(", "key", ")", "+", "' parameters '", "+", "str", "(", "value", ")", "+", "' found in %s.'", ",", "file_name", ")", "except", "KeyError", ":", "# parameter does not exists in the file name", "pass", "except", "IndexError", ":", "raise", "IncompleteInputError", "(", "'Something wrong check!'", ")", "if", "unique", "and", "scan_parameter_values", "is", "not", "None", ":", "existing", "=", "False", "for", "parameter", "in", "scan_parameter_values", ":", "# loop to determine if any value of any scan parameter exists already", "all_par_values", "=", "[", "values", "[", "parameter", "]", "for", "values", "in", "files_dict", ".", "values", "(", ")", "]", "if", "any", "(", "x", "in", "[", "scan_parameter_values", "[", "parameter", "]", "]", "for", "x", "in", "all_par_values", ")", ":", "existing", "=", "True", "break", "if", "not", "existing", ":", "files_dict", "[", "file_name", "]", "=", "scan_parameter_values", "else", ":", "logging", ".", "warning", "(", "'Scan parameter value(s) from %s exists already, do not add to result'", ",", "file_name", ")", "else", ":", "files_dict", "[", "file_name", "]", "=", "scan_parameter_values", "return", "collections", ".", "OrderedDict", "(", "sorted", "(", "files_dict", ".", "iteritems", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "if", "sort", "else", "files_dict", ")" ]
Takes a list of files, searches for the parameter name in the file name and in the file. Returns a ordered dict with the file name in the first dimension and the corresponding parameter values in the second. If a scan parameter appears in the file name and in the file the first parameter setting has to be in the file name, otherwise a warning is shown. The file names can be sorted by the first parameter value of each file. Parameters ---------- files : string, list of strings parameters : string, list of strings unique : boolean If set only one file per scan parameter value is used. sort : boolean Returns ------- collections.OrderedDict
[ "Takes", "a", "list", "of", "files", "searches", "for", "the", "parameter", "name", "in", "the", "file", "name", "and", "in", "the", "file", ".", "Returns", "a", "ordered", "dict", "with", "the", "file", "name", "in", "the", "first", "dimension", "and", "the", "corresponding", "parameter", "values", "in", "the", "second", ".", "If", "a", "scan", "parameter", "appears", "in", "the", "file", "name", "and", "in", "the", "file", "the", "first", "parameter", "setting", "has", "to", "be", "in", "the", "file", "name", "otherwise", "a", "warning", "is", "shown", ".", "The", "file", "names", "can", "be", "sorted", "by", "the", "first", "parameter", "value", "of", "each", "file", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L480-L555
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
check_parameter_similarity
def check_parameter_similarity(files_dict): """ Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input. """ try: parameter_names = files_dict.itervalues().next().keys() # get the parameter names of the first file, to check if these are the same in the other files except AttributeError: # if there is no parameter at all if any(i is not None for i in files_dict.itervalues()): # check if there is also no parameter for the other files return False else: return True if any(parameter_names != i.keys() for i in files_dict.itervalues()): return False return True
python
def check_parameter_similarity(files_dict): """ Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input. """ try: parameter_names = files_dict.itervalues().next().keys() # get the parameter names of the first file, to check if these are the same in the other files except AttributeError: # if there is no parameter at all if any(i is not None for i in files_dict.itervalues()): # check if there is also no parameter for the other files return False else: return True if any(parameter_names != i.keys() for i in files_dict.itervalues()): return False return True
[ "def", "check_parameter_similarity", "(", "files_dict", ")", ":", "try", ":", "parameter_names", "=", "files_dict", ".", "itervalues", "(", ")", ".", "next", "(", ")", ".", "keys", "(", ")", "# get the parameter names of the first file, to check if these are the same in the other files", "except", "AttributeError", ":", "# if there is no parameter at all", "if", "any", "(", "i", "is", "not", "None", "for", "i", "in", "files_dict", ".", "itervalues", "(", ")", ")", ":", "# check if there is also no parameter for the other files", "return", "False", "else", ":", "return", "True", "if", "any", "(", "parameter_names", "!=", "i", ".", "keys", "(", ")", "for", "i", "in", "files_dict", ".", "itervalues", "(", ")", ")", ":", "return", "False", "return", "True" ]
Checks if the parameter names of all files are similar. Takes the dictionary from get_parameter_from_files output as input.
[ "Checks", "if", "the", "parameter", "names", "of", "all", "files", "are", "similar", ".", "Takes", "the", "dictionary", "from", "get_parameter_from_files", "output", "as", "input", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L558-L572
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
combine_meta_data
def combine_meta_data(files_dict, meta_data_v2=True): """ Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array. Parameters ---------- meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. """ if len(files_dict) > 10: logging.info("Combine the meta data from %d files", len(files_dict)) # determine total length needed for the new combined array, thats the fastest way to combine arrays total_length = 0 # the total length of the new table for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file total_length += in_file_h5.root.meta_data.shape[0] if meta_data_v2: meta_data_combined = np.empty((total_length, ), dtype=[ ('index_start', np.uint32), ('index_stop', np.uint32), ('data_length', np.uint32), ('timestamp_start', np.float64), ('timestamp_stop', np.float64), ('error', np.uint32)]) else: meta_data_combined = np.empty((total_length, ), dtype=[ ('start_index', np.uint32), ('stop_index', np.uint32), ('length', np.uint32), ('timestamp', np.float64), ('error', np.uint32)]) if len(files_dict) > 10: progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80) progress_bar.start() index = 0 # fill actual result array for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file array_length = in_file_h5.root.meta_data.shape[0] meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:] index += array_length if len(files_dict) > 10: progress_bar.update(index) if len(files_dict) > 10: progress_bar.finish() return meta_data_combined
python
def combine_meta_data(files_dict, meta_data_v2=True): """ Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array. Parameters ---------- meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format. """ if len(files_dict) > 10: logging.info("Combine the meta data from %d files", len(files_dict)) # determine total length needed for the new combined array, thats the fastest way to combine arrays total_length = 0 # the total length of the new table for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file total_length += in_file_h5.root.meta_data.shape[0] if meta_data_v2: meta_data_combined = np.empty((total_length, ), dtype=[ ('index_start', np.uint32), ('index_stop', np.uint32), ('data_length', np.uint32), ('timestamp_start', np.float64), ('timestamp_stop', np.float64), ('error', np.uint32)]) else: meta_data_combined = np.empty((total_length, ), dtype=[ ('start_index', np.uint32), ('stop_index', np.uint32), ('length', np.uint32), ('timestamp', np.float64), ('error', np.uint32)]) if len(files_dict) > 10: progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80) progress_bar.start() index = 0 # fill actual result array for file_name in files_dict.iterkeys(): with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file array_length = in_file_h5.root.meta_data.shape[0] meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:] index += array_length if len(files_dict) > 10: progress_bar.update(index) if len(files_dict) > 10: progress_bar.finish() return meta_data_combined
[ "def", "combine_meta_data", "(", "files_dict", ",", "meta_data_v2", "=", "True", ")", ":", "if", "len", "(", "files_dict", ")", ">", "10", ":", "logging", ".", "info", "(", "\"Combine the meta data from %d files\"", ",", "len", "(", "files_dict", ")", ")", "# determine total length needed for the new combined array, thats the fastest way to combine arrays", "total_length", "=", "0", "# the total length of the new table", "for", "file_name", "in", "files_dict", ".", "iterkeys", "(", ")", ":", "with", "tb", ".", "open_file", "(", "file_name", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "# open the actual file", "total_length", "+=", "in_file_h5", ".", "root", ".", "meta_data", ".", "shape", "[", "0", "]", "if", "meta_data_v2", ":", "meta_data_combined", "=", "np", ".", "empty", "(", "(", "total_length", ",", ")", ",", "dtype", "=", "[", "(", "'index_start'", ",", "np", ".", "uint32", ")", ",", "(", "'index_stop'", ",", "np", ".", "uint32", ")", ",", "(", "'data_length'", ",", "np", ".", "uint32", ")", ",", "(", "'timestamp_start'", ",", "np", ".", "float64", ")", ",", "(", "'timestamp_stop'", ",", "np", ".", "float64", ")", ",", "(", "'error'", ",", "np", ".", "uint32", ")", "]", ")", "else", ":", "meta_data_combined", "=", "np", ".", "empty", "(", "(", "total_length", ",", ")", ",", "dtype", "=", "[", "(", "'start_index'", ",", "np", ".", "uint32", ")", ",", "(", "'stop_index'", ",", "np", ".", "uint32", ")", ",", "(", "'length'", ",", "np", ".", "uint32", ")", ",", "(", "'timestamp'", ",", "np", ".", "float64", ")", ",", "(", "'error'", ",", "np", ".", "uint32", ")", "]", ")", "if", "len", "(", "files_dict", ")", ">", "10", ":", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "total_length", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "index", "=", "0", "# fill actual result array", "for", "file_name", "in", "files_dict", ".", "iterkeys", "(", ")", ":", "with", "tb", ".", "open_file", "(", "file_name", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "# open the actual file", "array_length", "=", "in_file_h5", ".", "root", ".", "meta_data", ".", "shape", "[", "0", "]", "meta_data_combined", "[", "index", ":", "index", "+", "array_length", "]", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "index", "+=", "array_length", "if", "len", "(", "files_dict", ")", ">", "10", ":", "progress_bar", ".", "update", "(", "index", ")", "if", "len", "(", "files_dict", ")", ">", "10", ":", "progress_bar", ".", "finish", "(", ")", "return", "meta_data_combined" ]
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array. Parameters ---------- meta_data_v2 : bool True for new (v2) meta data format, False for the old (v1) format.
[ "Takes", "the", "dict", "of", "hdf5", "files", "and", "combines", "their", "meta", "data", "tables", "into", "one", "new", "numpy", "record", "array", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L575-L624
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
smooth_differentiation
def smooth_differentiation(x, y, weigths=None, order=5, smoothness=3, derivation=1): '''Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") f = splrep(x, y, w=weigths, k=order, s=smoothness) # spline function return splev(x, f, der=derivation)
python
def smooth_differentiation(x, y, weigths=None, order=5, smoothness=3, derivation=1): '''Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like ''' if (len(x) != len(y)): raise ValueError("x, y must have the same length") f = splrep(x, y, w=weigths, k=order, s=smoothness) # spline function return splev(x, f, der=derivation)
[ "def", "smooth_differentiation", "(", "x", ",", "y", ",", "weigths", "=", "None", ",", "order", "=", "5", ",", "smoothness", "=", "3", ",", "derivation", "=", "1", ")", ":", "if", "(", "len", "(", "x", ")", "!=", "len", "(", "y", ")", ")", ":", "raise", "ValueError", "(", "\"x, y must have the same length\"", ")", "f", "=", "splrep", "(", "x", ",", "y", ",", "w", "=", "weigths", ",", "k", "=", "order", ",", "s", "=", "smoothness", ")", "# spline function", "return", "splev", "(", "x", ",", "f", ",", "der", "=", "derivation", ")" ]
Returns the dy/dx(x) with the fit and differentiation of a spline curve Parameters ---------- x : array like y : array like Returns ------- dy/dx : array like
[ "Returns", "the", "dy", "/", "dx", "(", "x", ")", "with", "the", "fit", "and", "differentiation", "of", "a", "spline", "curve" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L627-L642
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
reduce_sorted_to_intersect
def reduce_sorted_to_intersect(ar1, ar2): """ Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like Input array. Returns ------- ar1, ar1 : ndarray, ndarray The intersection values. """ # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # get min max values of the arrays ar1_biggest_value = ar1[-1] ar1_smallest_value = ar1[0] ar2_biggest_value = ar2[-1] ar2_smallest_value = ar2[0] if ar1_biggest_value < ar2_smallest_value or ar1_smallest_value > ar2_biggest_value: # special case, no intersection at all return ar1[0:0], ar2[0:0] # get min/max indices with values that are also in the other array min_index_ar1 = np.argmin(ar1 < ar2_smallest_value) max_index_ar1 = np.argmax(ar1 > ar2_biggest_value) min_index_ar2 = np.argmin(ar2 < ar1_smallest_value) max_index_ar2 = np.argmax(ar2 > ar1_biggest_value) if min_index_ar1 < 0: min_index_ar1 = 0 if min_index_ar2 < 0: min_index_ar2 = 0 if max_index_ar1 == 0 or max_index_ar1 > ar1.shape[0]: max_index_ar1 = ar1.shape[0] if max_index_ar2 == 0 or max_index_ar2 > ar2.shape[0]: max_index_ar2 = ar2.shape[0] # reduce the data return ar1[min_index_ar1:max_index_ar1], ar2[min_index_ar2:max_index_ar2]
python
def reduce_sorted_to_intersect(ar1, ar2): """ Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like Input array. Returns ------- ar1, ar1 : ndarray, ndarray The intersection values. """ # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # get min max values of the arrays ar1_biggest_value = ar1[-1] ar1_smallest_value = ar1[0] ar2_biggest_value = ar2[-1] ar2_smallest_value = ar2[0] if ar1_biggest_value < ar2_smallest_value or ar1_smallest_value > ar2_biggest_value: # special case, no intersection at all return ar1[0:0], ar2[0:0] # get min/max indices with values that are also in the other array min_index_ar1 = np.argmin(ar1 < ar2_smallest_value) max_index_ar1 = np.argmax(ar1 > ar2_biggest_value) min_index_ar2 = np.argmin(ar2 < ar1_smallest_value) max_index_ar2 = np.argmax(ar2 > ar1_biggest_value) if min_index_ar1 < 0: min_index_ar1 = 0 if min_index_ar2 < 0: min_index_ar2 = 0 if max_index_ar1 == 0 or max_index_ar1 > ar1.shape[0]: max_index_ar1 = ar1.shape[0] if max_index_ar2 == 0 or max_index_ar2 > ar2.shape[0]: max_index_ar2 = ar2.shape[0] # reduce the data return ar1[min_index_ar1:max_index_ar1], ar2[min_index_ar2:max_index_ar2]
[ "def", "reduce_sorted_to_intersect", "(", "ar1", ",", "ar2", ")", ":", "# Ravel both arrays, behavior for the first array could be different", "ar1", "=", "np", ".", "asarray", "(", "ar1", ")", ".", "ravel", "(", ")", "ar2", "=", "np", ".", "asarray", "(", "ar2", ")", ".", "ravel", "(", ")", "# get min max values of the arrays", "ar1_biggest_value", "=", "ar1", "[", "-", "1", "]", "ar1_smallest_value", "=", "ar1", "[", "0", "]", "ar2_biggest_value", "=", "ar2", "[", "-", "1", "]", "ar2_smallest_value", "=", "ar2", "[", "0", "]", "if", "ar1_biggest_value", "<", "ar2_smallest_value", "or", "ar1_smallest_value", ">", "ar2_biggest_value", ":", "# special case, no intersection at all", "return", "ar1", "[", "0", ":", "0", "]", ",", "ar2", "[", "0", ":", "0", "]", "# get min/max indices with values that are also in the other array", "min_index_ar1", "=", "np", ".", "argmin", "(", "ar1", "<", "ar2_smallest_value", ")", "max_index_ar1", "=", "np", ".", "argmax", "(", "ar1", ">", "ar2_biggest_value", ")", "min_index_ar2", "=", "np", ".", "argmin", "(", "ar2", "<", "ar1_smallest_value", ")", "max_index_ar2", "=", "np", ".", "argmax", "(", "ar2", ">", "ar1_biggest_value", ")", "if", "min_index_ar1", "<", "0", ":", "min_index_ar1", "=", "0", "if", "min_index_ar2", "<", "0", ":", "min_index_ar2", "=", "0", "if", "max_index_ar1", "==", "0", "or", "max_index_ar1", ">", "ar1", ".", "shape", "[", "0", "]", ":", "max_index_ar1", "=", "ar1", ".", "shape", "[", "0", "]", "if", "max_index_ar2", "==", "0", "or", "max_index_ar2", ">", "ar2", ".", "shape", "[", "0", "]", ":", "max_index_ar2", "=", "ar2", ".", "shape", "[", "0", "]", "# reduce the data", "return", "ar1", "[", "min_index_ar1", ":", "max_index_ar1", "]", ",", "ar2", "[", "min_index_ar2", ":", "max_index_ar2", "]" ]
Takes two sorted arrays and return the intersection ar1 in ar2, ar2 in ar1. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like Input array. Returns ------- ar1, ar1 : ndarray, ndarray The intersection values.
[ "Takes", "two", "sorted", "arrays", "and", "return", "the", "intersection", "ar1", "in", "ar2", "ar2", "in", "ar1", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L645-L691
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_not_unique_values
def get_not_unique_values(array): '''Returns the values that appear at least twice in array. Parameters ---------- array : array like Returns ------- numpy.array ''' s = np.sort(array, axis=None) s = s[s[1:] == s[:-1]] return np.unique(s)
python
def get_not_unique_values(array): '''Returns the values that appear at least twice in array. Parameters ---------- array : array like Returns ------- numpy.array ''' s = np.sort(array, axis=None) s = s[s[1:] == s[:-1]] return np.unique(s)
[ "def", "get_not_unique_values", "(", "array", ")", ":", "s", "=", "np", ".", "sort", "(", "array", ",", "axis", "=", "None", ")", "s", "=", "s", "[", "s", "[", "1", ":", "]", "==", "s", "[", ":", "-", "1", "]", "]", "return", "np", ".", "unique", "(", "s", ")" ]
Returns the values that appear at least twice in array. Parameters ---------- array : array like Returns ------- numpy.array
[ "Returns", "the", "values", "that", "appear", "at", "least", "twice", "in", "array", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L694-L707
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_meta_data_index_at_scan_parameter
def get_meta_data_index_at_scan_parameter(meta_data_array, scan_parameter_name): '''Takes the analyzed meta_data table and returns the indices where the scan parameter changes Parameters ---------- meta_data_array : numpy.recordarray scan_parameter_name : string Returns ------- numpy.ndarray: first dimension: scan parameter value second dimension: index where scan parameter value was used first ''' scan_parameter_values = meta_data_array[scan_parameter_name] diff = np.concatenate(([1], np.diff(scan_parameter_values))) idx = np.concatenate((np.where(diff)[0], [len(scan_parameter_values)])) index = np.empty(len(idx) - 1, dtype={'names': [scan_parameter_name, 'index'], 'formats': ['u4', 'u4']}) index[scan_parameter_name] = scan_parameter_values[idx[:-1]] index['index'] = idx[:-1] return index
python
def get_meta_data_index_at_scan_parameter(meta_data_array, scan_parameter_name): '''Takes the analyzed meta_data table and returns the indices where the scan parameter changes Parameters ---------- meta_data_array : numpy.recordarray scan_parameter_name : string Returns ------- numpy.ndarray: first dimension: scan parameter value second dimension: index where scan parameter value was used first ''' scan_parameter_values = meta_data_array[scan_parameter_name] diff = np.concatenate(([1], np.diff(scan_parameter_values))) idx = np.concatenate((np.where(diff)[0], [len(scan_parameter_values)])) index = np.empty(len(idx) - 1, dtype={'names': [scan_parameter_name, 'index'], 'formats': ['u4', 'u4']}) index[scan_parameter_name] = scan_parameter_values[idx[:-1]] index['index'] = idx[:-1] return index
[ "def", "get_meta_data_index_at_scan_parameter", "(", "meta_data_array", ",", "scan_parameter_name", ")", ":", "scan_parameter_values", "=", "meta_data_array", "[", "scan_parameter_name", "]", "diff", "=", "np", ".", "concatenate", "(", "(", "[", "1", "]", ",", "np", ".", "diff", "(", "scan_parameter_values", ")", ")", ")", "idx", "=", "np", ".", "concatenate", "(", "(", "np", ".", "where", "(", "diff", ")", "[", "0", "]", ",", "[", "len", "(", "scan_parameter_values", ")", "]", ")", ")", "index", "=", "np", ".", "empty", "(", "len", "(", "idx", ")", "-", "1", ",", "dtype", "=", "{", "'names'", ":", "[", "scan_parameter_name", ",", "'index'", "]", ",", "'formats'", ":", "[", "'u4'", ",", "'u4'", "]", "}", ")", "index", "[", "scan_parameter_name", "]", "=", "scan_parameter_values", "[", "idx", "[", ":", "-", "1", "]", "]", "index", "[", "'index'", "]", "=", "idx", "[", ":", "-", "1", "]", "return", "index" ]
Takes the analyzed meta_data table and returns the indices where the scan parameter changes Parameters ---------- meta_data_array : numpy.recordarray scan_parameter_name : string Returns ------- numpy.ndarray: first dimension: scan parameter value second dimension: index where scan parameter value was used first
[ "Takes", "the", "analyzed", "meta_data", "table", "and", "returns", "the", "indices", "where", "the", "scan", "parameter", "changes" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L710-L730
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
select_hits
def select_hits(hits_array, condition=None): '''Selects the hits with condition. E.g.: condition = 'rel_BCID == 7 & event_number < 1000' Parameters ---------- hits_array : numpy.array condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the selceted hits ''' if condition is None: return hits_array for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits_array[\'' + variable + '\']') return hits_array[ne.evaluate(condition)]
python
def select_hits(hits_array, condition=None): '''Selects the hits with condition. E.g.: condition = 'rel_BCID == 7 & event_number < 1000' Parameters ---------- hits_array : numpy.array condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the selceted hits ''' if condition is None: return hits_array for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits_array[\'' + variable + '\']') return hits_array[ne.evaluate(condition)]
[ "def", "select_hits", "(", "hits_array", ",", "condition", "=", "None", ")", ":", "if", "condition", "is", "None", ":", "return", "hits_array", "for", "variable", "in", "set", "(", "re", ".", "findall", "(", "r'[a-zA-Z_]+'", ",", "condition", ")", ")", ":", "exec", "(", "variable", "+", "' = hits_array[\\''", "+", "variable", "+", "'\\']'", ")", "return", "hits_array", "[", "ne", ".", "evaluate", "(", "condition", ")", "]" ]
Selects the hits with condition. E.g.: condition = 'rel_BCID == 7 & event_number < 1000' Parameters ---------- hits_array : numpy.array condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the selceted hits
[ "Selects", "the", "hits", "with", "condition", ".", "E", ".", "g", ".", ":", "condition", "=", "rel_BCID", "==", "7", "&", "event_number", "<", "1000" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L749-L770
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_hits_in_events
def get_hits_in_events(hits_array, events, assume_sorted=True, condition=None): '''Selects the hits that occurred in events and optional selection criterion. If a event range can be defined use the get_data_in_event_range function. It is much faster. Parameters ---------- hits_array : numpy.array events : array assume_sorted : bool Is true if the events to select are sorted from low to high value. Increases speed by 35%. condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the hits in events. ''' logging.debug("Calculate hits that exists in the given %d events." % len(events)) if assume_sorted: events, _ = reduce_sorted_to_intersect(events, hits_array['event_number']) # reduce the event number range to the max min event number of the given hits to save time if events.shape[0] == 0: # if there is not a single selected hit return hits_array[0:0] try: if assume_sorted: selection = analysis_utils.in1d_events(hits_array['event_number'], events) else: logging.warning('Events are usually sorted. Are you sure you want this?') selection = np.in1d(hits_array['event_number'], events) if condition is None: hits_in_events = hits_array[selection] else: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits_array[\'' + variable + '\']') hits_in_events = hits_array[ne.evaluate(condition + ' & selection')] except MemoryError: logging.error('There are too many hits to do in RAM operations. Consider decreasing chunk size and use the write_hits_in_events function instead.') raise MemoryError return hits_in_events
python
def get_hits_in_events(hits_array, events, assume_sorted=True, condition=None): '''Selects the hits that occurred in events and optional selection criterion. If a event range can be defined use the get_data_in_event_range function. It is much faster. Parameters ---------- hits_array : numpy.array events : array assume_sorted : bool Is true if the events to select are sorted from low to high value. Increases speed by 35%. condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the hits in events. ''' logging.debug("Calculate hits that exists in the given %d events." % len(events)) if assume_sorted: events, _ = reduce_sorted_to_intersect(events, hits_array['event_number']) # reduce the event number range to the max min event number of the given hits to save time if events.shape[0] == 0: # if there is not a single selected hit return hits_array[0:0] try: if assume_sorted: selection = analysis_utils.in1d_events(hits_array['event_number'], events) else: logging.warning('Events are usually sorted. Are you sure you want this?') selection = np.in1d(hits_array['event_number'], events) if condition is None: hits_in_events = hits_array[selection] else: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits_array[\'' + variable + '\']') hits_in_events = hits_array[ne.evaluate(condition + ' & selection')] except MemoryError: logging.error('There are too many hits to do in RAM operations. Consider decreasing chunk size and use the write_hits_in_events function instead.') raise MemoryError return hits_in_events
[ "def", "get_hits_in_events", "(", "hits_array", ",", "events", ",", "assume_sorted", "=", "True", ",", "condition", "=", "None", ")", ":", "logging", ".", "debug", "(", "\"Calculate hits that exists in the given %d events.\"", "%", "len", "(", "events", ")", ")", "if", "assume_sorted", ":", "events", ",", "_", "=", "reduce_sorted_to_intersect", "(", "events", ",", "hits_array", "[", "'event_number'", "]", ")", "# reduce the event number range to the max min event number of the given hits to save time", "if", "events", ".", "shape", "[", "0", "]", "==", "0", ":", "# if there is not a single selected hit", "return", "hits_array", "[", "0", ":", "0", "]", "try", ":", "if", "assume_sorted", ":", "selection", "=", "analysis_utils", ".", "in1d_events", "(", "hits_array", "[", "'event_number'", "]", ",", "events", ")", "else", ":", "logging", ".", "warning", "(", "'Events are usually sorted. Are you sure you want this?'", ")", "selection", "=", "np", ".", "in1d", "(", "hits_array", "[", "'event_number'", "]", ",", "events", ")", "if", "condition", "is", "None", ":", "hits_in_events", "=", "hits_array", "[", "selection", "]", "else", ":", "# bad hack to be able to use numexpr", "for", "variable", "in", "set", "(", "re", ".", "findall", "(", "r'[a-zA-Z_]+'", ",", "condition", ")", ")", ":", "exec", "(", "variable", "+", "' = hits_array[\\''", "+", "variable", "+", "'\\']'", ")", "hits_in_events", "=", "hits_array", "[", "ne", ".", "evaluate", "(", "condition", "+", "' & selection'", ")", "]", "except", "MemoryError", ":", "logging", ".", "error", "(", "'There are too many hits to do in RAM operations. Consider decreasing chunk size and use the write_hits_in_events function instead.'", ")", "raise", "MemoryError", "return", "hits_in_events" ]
Selects the hits that occurred in events and optional selection criterion. If a event range can be defined use the get_data_in_event_range function. It is much faster. Parameters ---------- hits_array : numpy.array events : array assume_sorted : bool Is true if the events to select are sorted from low to high value. Increases speed by 35%. condition : string A condition that is applied to the hits in numexpr. Only if the expression evaluates to True the hit is taken. Returns ------- numpy.array hit array with the hits in events.
[ "Selects", "the", "hits", "that", "occurred", "in", "events", "and", "optional", "selection", "criterion", ".", "If", "a", "event", "range", "can", "be", "defined", "use", "the", "get_data_in_event_range", "function", ".", "It", "is", "much", "faster", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L773-L814
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_hits_of_scan_parameter
def get_hits_of_scan_parameter(input_file_hits, scan_parameters=None, try_speedup=False, chunk_size=10000000): '''Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple ''' with tb.open_file(input_file_hits, mode="r+") as in_file_h5: hit_table = in_file_h5.root.Hits meta_data = in_file_h5.root.meta_data[:] meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.debug('Read hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, try_speedup=try_speedup, chunk_size=best_chunk_size): yield parameter_values[parameter_index], hits readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size
python
def get_hits_of_scan_parameter(input_file_hits, scan_parameters=None, try_speedup=False, chunk_size=10000000): '''Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple ''' with tb.open_file(input_file_hits, mode="r+") as in_file_h5: hit_table = in_file_h5.root.Hits meta_data = in_file_h5.root.meta_data[:] meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameters) parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameters) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) # get the event number ranges for the different scan parameter settings index_event_number(hit_table) # create a event_numer index to select the hits by their event number fast, no needed but important for speed up # # variables for read speed up index = 0 # index where to start the read out of the hit table, 0 at the beginning, increased during looping best_chunk_size = chunk_size # number of hits to copy to RAM during looping, the optimal chunk size is determined during looping # loop over the selected events for parameter_index, (start_event_number, stop_event_number) in enumerate(event_number_ranges): logging.debug('Read hits for ' + str(scan_parameters) + ' = ' + str(parameter_values[parameter_index])) readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up # loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given for hits, index in data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, try_speedup=try_speedup, chunk_size=best_chunk_size): yield parameter_values[parameter_index], hits readout_hit_len += hits.shape[0] best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size and int(1.05 * readout_hit_len) > 1e3 else chunk_size
[ "def", "get_hits_of_scan_parameter", "(", "input_file_hits", ",", "scan_parameters", "=", "None", ",", "try_speedup", "=", "False", ",", "chunk_size", "=", "10000000", ")", ":", "with", "tb", ".", "open_file", "(", "input_file_hits", ",", "mode", "=", "\"r+\"", ")", "as", "in_file_h5", ":", "hit_table", "=", "in_file_h5", ".", "root", ".", "Hits", "meta_data", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "meta_data_table_at_scan_parameter", "=", "get_unique_scan_parameter_combinations", "(", "meta_data", ",", "scan_parameters", "=", "scan_parameters", ")", "parameter_values", "=", "get_scan_parameters_table_from_meta_data", "(", "meta_data_table_at_scan_parameter", ",", "scan_parameters", ")", "event_number_ranges", "=", "get_ranges_from_array", "(", "meta_data_table_at_scan_parameter", "[", "'event_number'", "]", ")", "# get the event number ranges for the different scan parameter settings", "index_event_number", "(", "hit_table", ")", "# create a event_numer index to select the hits by their event number fast, no needed but important for speed up", "#", "# variables for read speed up", "index", "=", "0", "# index where to start the read out of the hit table, 0 at the beginning, increased during looping", "best_chunk_size", "=", "chunk_size", "# number of hits to copy to RAM during looping, the optimal chunk size is determined during looping", "# loop over the selected events", "for", "parameter_index", ",", "(", "start_event_number", ",", "stop_event_number", ")", "in", "enumerate", "(", "event_number_ranges", ")", ":", "logging", ".", "debug", "(", "'Read hits for '", "+", "str", "(", "scan_parameters", ")", "+", "' = '", "+", "str", "(", "parameter_values", "[", "parameter_index", "]", ")", ")", "readout_hit_len", "=", "0", "# variable to calculate a optimal chunk size value from the number of hits for speed up", "# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given", "for", "hits", ",", "index", "in", "data_aligned_at_events", "(", "hit_table", ",", "start_event_number", "=", "start_event_number", ",", "stop_event_number", "=", "stop_event_number", ",", "start_index", "=", "index", ",", "try_speedup", "=", "try_speedup", ",", "chunk_size", "=", "best_chunk_size", ")", ":", "yield", "parameter_values", "[", "parameter_index", "]", ",", "hits", "readout_hit_len", "+=", "hits", ".", "shape", "[", "0", "]", "best_chunk_size", "=", "int", "(", "1.5", "*", "readout_hit_len", ")", "if", "int", "(", "1.05", "*", "readout_hit_len", ")", "<", "chunk_size", "and", "int", "(", "1.05", "*", "readout_hit_len", ")", ">", "1e3", "else", "chunk_size" ]
Takes the hit table of a hdf5 file and returns hits in chunks for each unique combination of scan_parameters. Yields the hits in chunks, since they usually do not fit into memory. Parameters ---------- input_file_hits : pytable hdf5 file Has to include a hits node scan_parameters : iterable with strings try_speedup : bool If true a speed up by searching for the event numbers in the data is done. If the event numbers are not in the data this slows down the search. chunk_size : int How many rows of data are read into ram. Returns ------- Yields tuple, numpy.array Actual scan parameter tuple, hit array with the hits of a chunk of the given scan parameter tuple
[ "Takes", "the", "hit", "table", "of", "a", "hdf5", "file", "and", "returns", "hits", "in", "chunks", "for", "each", "unique", "combination", "of", "scan_parameters", ".", "Yields", "the", "hits", "in", "chunks", "since", "they", "usually", "do", "not", "fit", "into", "memory", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L817-L859
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_data_in_event_range
def get_data_in_event_range(array, event_start=None, event_stop=None, assume_sorted=True): '''Selects the data (rows of a table) that occurred in the given event range [event_start, event_stop[ Parameters ---------- array : numpy.array event_start : int, None event_stop : int, None assume_sorted : bool Set to true if the hits are sorted by the event_number. Increases speed. Returns ------- numpy.array hit array with the hits in the event range. ''' logging.debug("Calculate data of the the given event range [" + str(event_start) + ", " + str(event_stop) + "[") event_number = array['event_number'] if assume_sorted: data_event_start = event_number[0] data_event_stop = event_number[-1] if (event_start is not None and event_stop is not None) and (data_event_stop < event_start or data_event_start > event_stop or event_start == event_stop): # special case, no intersection at all return array[0:0] # get min/max indices with values that are also in the other array if event_start is None: min_index_data = 0 else: min_index_data = np.argmin(event_number < event_start) if event_stop is None: max_index_data = event_number.shape[0] else: max_index_data = np.argmax(event_number >= event_stop) if min_index_data < 0: min_index_data = 0 if max_index_data == 0 or max_index_data > event_number.shape[0]: max_index_data = event_number.shape[0] return array[min_index_data:max_index_data] else: return array[ne.evaluate('event_number >= event_start & event_number < event_stop')]
python
def get_data_in_event_range(array, event_start=None, event_stop=None, assume_sorted=True): '''Selects the data (rows of a table) that occurred in the given event range [event_start, event_stop[ Parameters ---------- array : numpy.array event_start : int, None event_stop : int, None assume_sorted : bool Set to true if the hits are sorted by the event_number. Increases speed. Returns ------- numpy.array hit array with the hits in the event range. ''' logging.debug("Calculate data of the the given event range [" + str(event_start) + ", " + str(event_stop) + "[") event_number = array['event_number'] if assume_sorted: data_event_start = event_number[0] data_event_stop = event_number[-1] if (event_start is not None and event_stop is not None) and (data_event_stop < event_start or data_event_start > event_stop or event_start == event_stop): # special case, no intersection at all return array[0:0] # get min/max indices with values that are also in the other array if event_start is None: min_index_data = 0 else: min_index_data = np.argmin(event_number < event_start) if event_stop is None: max_index_data = event_number.shape[0] else: max_index_data = np.argmax(event_number >= event_stop) if min_index_data < 0: min_index_data = 0 if max_index_data == 0 or max_index_data > event_number.shape[0]: max_index_data = event_number.shape[0] return array[min_index_data:max_index_data] else: return array[ne.evaluate('event_number >= event_start & event_number < event_stop')]
[ "def", "get_data_in_event_range", "(", "array", ",", "event_start", "=", "None", ",", "event_stop", "=", "None", ",", "assume_sorted", "=", "True", ")", ":", "logging", ".", "debug", "(", "\"Calculate data of the the given event range [\"", "+", "str", "(", "event_start", ")", "+", "\", \"", "+", "str", "(", "event_stop", ")", "+", "\"[\"", ")", "event_number", "=", "array", "[", "'event_number'", "]", "if", "assume_sorted", ":", "data_event_start", "=", "event_number", "[", "0", "]", "data_event_stop", "=", "event_number", "[", "-", "1", "]", "if", "(", "event_start", "is", "not", "None", "and", "event_stop", "is", "not", "None", ")", "and", "(", "data_event_stop", "<", "event_start", "or", "data_event_start", ">", "event_stop", "or", "event_start", "==", "event_stop", ")", ":", "# special case, no intersection at all", "return", "array", "[", "0", ":", "0", "]", "# get min/max indices with values that are also in the other array", "if", "event_start", "is", "None", ":", "min_index_data", "=", "0", "else", ":", "min_index_data", "=", "np", ".", "argmin", "(", "event_number", "<", "event_start", ")", "if", "event_stop", "is", "None", ":", "max_index_data", "=", "event_number", ".", "shape", "[", "0", "]", "else", ":", "max_index_data", "=", "np", ".", "argmax", "(", "event_number", ">=", "event_stop", ")", "if", "min_index_data", "<", "0", ":", "min_index_data", "=", "0", "if", "max_index_data", "==", "0", "or", "max_index_data", ">", "event_number", ".", "shape", "[", "0", "]", ":", "max_index_data", "=", "event_number", ".", "shape", "[", "0", "]", "return", "array", "[", "min_index_data", ":", "max_index_data", "]", "else", ":", "return", "array", "[", "ne", ".", "evaluate", "(", "'event_number >= event_start & event_number < event_stop'", ")", "]" ]
Selects the data (rows of a table) that occurred in the given event range [event_start, event_stop[ Parameters ---------- array : numpy.array event_start : int, None event_stop : int, None assume_sorted : bool Set to true if the hits are sorted by the event_number. Increases speed. Returns ------- numpy.array hit array with the hits in the event range.
[ "Selects", "the", "data", "(", "rows", "of", "a", "table", ")", "that", "occurred", "in", "the", "given", "event", "range", "[", "event_start", "event_stop", "[" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L862-L903
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
write_hits_in_events
def write_hits_in_events(hit_table_in, hit_table_out, events, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in events and writes them to a pytable. This function reduces the in RAM operations and has to be used if the get_hits_in_events function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out events : array like defines the events to be written from hit_table_in to hit_table_out. They do not have to exists at all. start_hit_word: int Index of the first hit word to be analyzed. Used for speed up. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' if len(events) > 0: # needed to avoid crash min_event = np.amin(events) max_event = np.amax(events) logging.debug("Write hits from hit number >= %d that exists in the selected %d events with %d <= event number <= %d into a new hit table." % (start_hit_word, len(events), min_event, max_event)) table_size = hit_table_in.shape[0] iHit = 0 for iHit in range(start_hit_word, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] hit_table_out.append(get_hits_in_events(hits, events=events, condition=condition)) if last_event_number > max_event: # speed up, use the fact that the hits are sorted by event_number return iHit return start_hit_word
python
def write_hits_in_events(hit_table_in, hit_table_out, events, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in events and writes them to a pytable. This function reduces the in RAM operations and has to be used if the get_hits_in_events function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out events : array like defines the events to be written from hit_table_in to hit_table_out. They do not have to exists at all. start_hit_word: int Index of the first hit word to be analyzed. Used for speed up. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' if len(events) > 0: # needed to avoid crash min_event = np.amin(events) max_event = np.amax(events) logging.debug("Write hits from hit number >= %d that exists in the selected %d events with %d <= event number <= %d into a new hit table." % (start_hit_word, len(events), min_event, max_event)) table_size = hit_table_in.shape[0] iHit = 0 for iHit in range(start_hit_word, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] hit_table_out.append(get_hits_in_events(hits, events=events, condition=condition)) if last_event_number > max_event: # speed up, use the fact that the hits are sorted by event_number return iHit return start_hit_word
[ "def", "write_hits_in_events", "(", "hit_table_in", ",", "hit_table_out", ",", "events", ",", "start_hit_word", "=", "0", ",", "chunk_size", "=", "5000000", ",", "condition", "=", "None", ")", ":", "if", "len", "(", "events", ")", ">", "0", ":", "# needed to avoid crash", "min_event", "=", "np", ".", "amin", "(", "events", ")", "max_event", "=", "np", ".", "amax", "(", "events", ")", "logging", ".", "debug", "(", "\"Write hits from hit number >= %d that exists in the selected %d events with %d <= event number <= %d into a new hit table.\"", "%", "(", "start_hit_word", ",", "len", "(", "events", ")", ",", "min_event", ",", "max_event", ")", ")", "table_size", "=", "hit_table_in", ".", "shape", "[", "0", "]", "iHit", "=", "0", "for", "iHit", "in", "range", "(", "start_hit_word", ",", "table_size", ",", "chunk_size", ")", ":", "hits", "=", "hit_table_in", ".", "read", "(", "iHit", ",", "iHit", "+", "chunk_size", ")", "last_event_number", "=", "hits", "[", "-", "1", "]", "[", "'event_number'", "]", "hit_table_out", ".", "append", "(", "get_hits_in_events", "(", "hits", ",", "events", "=", "events", ",", "condition", "=", "condition", ")", ")", "if", "last_event_number", ">", "max_event", ":", "# speed up, use the fact that the hits are sorted by event_number", "return", "iHit", "return", "start_hit_word" ]
Selects the hits that occurred in events and writes them to a pytable. This function reduces the in RAM operations and has to be used if the get_hits_in_events function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out events : array like defines the events to be written from hit_table_in to hit_table_out. They do not have to exists at all. start_hit_word: int Index of the first hit word to be analyzed. Used for speed up. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events.
[ "Selects", "the", "hits", "that", "occurred", "in", "events", "and", "writes", "them", "to", "a", "pytable", ".", "This", "function", "reduces", "the", "in", "RAM", "operations", "and", "has", "to", "be", "used", "if", "the", "get_hits_in_events", "function", "raises", "a", "memory", "error", ".", "Also", "a", "condition", "can", "be", "set", "to", "select", "hits", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L906-L941
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
write_hits_in_event_range
def write_hits_in_event_range(hit_table_in, hit_table_out, event_start=None, event_stop=None, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in given event range [event_start, event_stop[ and write them to a pytable. This function reduces the in RAM operations and has to be used if the get_data_in_event_range function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out event_start, event_stop : int, None start/stop event numbers. Stop event number is excluded. If None start/stop is set automatically. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' logging.debug('Write hits that exists in the given event range from + ' + str(event_start) + ' to ' + str(event_stop) + ' into a new hit table') table_size = hit_table_in.shape[0] for iHit in range(0, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] selected_hits = get_data_in_event_range(hits, event_start=event_start, event_stop=event_stop) if condition is not None: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits[\'' + variable + '\']') selected_hits = selected_hits[ne.evaluate(condition)] hit_table_out.append(selected_hits) if last_event_number > event_stop: # speed up, use the fact that the hits are sorted by event_number return iHit + chunk_size return start_hit_word
python
def write_hits_in_event_range(hit_table_in, hit_table_out, event_start=None, event_stop=None, start_hit_word=0, chunk_size=5000000, condition=None): '''Selects the hits that occurred in given event range [event_start, event_stop[ and write them to a pytable. This function reduces the in RAM operations and has to be used if the get_data_in_event_range function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out event_start, event_stop : int, None start/stop event numbers. Stop event number is excluded. If None start/stop is set automatically. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events. ''' logging.debug('Write hits that exists in the given event range from + ' + str(event_start) + ' to ' + str(event_stop) + ' into a new hit table') table_size = hit_table_in.shape[0] for iHit in range(0, table_size, chunk_size): hits = hit_table_in.read(iHit, iHit + chunk_size) last_event_number = hits[-1]['event_number'] selected_hits = get_data_in_event_range(hits, event_start=event_start, event_stop=event_stop) if condition is not None: # bad hack to be able to use numexpr for variable in set(re.findall(r'[a-zA-Z_]+', condition)): exec(variable + ' = hits[\'' + variable + '\']') selected_hits = selected_hits[ne.evaluate(condition)] hit_table_out.append(selected_hits) if last_event_number > event_stop: # speed up, use the fact that the hits are sorted by event_number return iHit + chunk_size return start_hit_word
[ "def", "write_hits_in_event_range", "(", "hit_table_in", ",", "hit_table_out", ",", "event_start", "=", "None", ",", "event_stop", "=", "None", ",", "start_hit_word", "=", "0", ",", "chunk_size", "=", "5000000", ",", "condition", "=", "None", ")", ":", "logging", ".", "debug", "(", "'Write hits that exists in the given event range from + '", "+", "str", "(", "event_start", ")", "+", "' to '", "+", "str", "(", "event_stop", ")", "+", "' into a new hit table'", ")", "table_size", "=", "hit_table_in", ".", "shape", "[", "0", "]", "for", "iHit", "in", "range", "(", "0", ",", "table_size", ",", "chunk_size", ")", ":", "hits", "=", "hit_table_in", ".", "read", "(", "iHit", ",", "iHit", "+", "chunk_size", ")", "last_event_number", "=", "hits", "[", "-", "1", "]", "[", "'event_number'", "]", "selected_hits", "=", "get_data_in_event_range", "(", "hits", ",", "event_start", "=", "event_start", ",", "event_stop", "=", "event_stop", ")", "if", "condition", "is", "not", "None", ":", "# bad hack to be able to use numexpr", "for", "variable", "in", "set", "(", "re", ".", "findall", "(", "r'[a-zA-Z_]+'", ",", "condition", ")", ")", ":", "exec", "(", "variable", "+", "' = hits[\\''", "+", "variable", "+", "'\\']'", ")", "selected_hits", "=", "selected_hits", "[", "ne", ".", "evaluate", "(", "condition", ")", "]", "hit_table_out", ".", "append", "(", "selected_hits", ")", "if", "last_event_number", ">", "event_stop", ":", "# speed up, use the fact that the hits are sorted by event_number", "return", "iHit", "+", "chunk_size", "return", "start_hit_word" ]
Selects the hits that occurred in given event range [event_start, event_stop[ and write them to a pytable. This function reduces the in RAM operations and has to be used if the get_data_in_event_range function raises a memory error. Also a condition can be set to select hits. Parameters ---------- hit_table_in : pytable.table hit_table_out : pytable.table functions need to be able to write to hit_table_out event_start, event_stop : int, None start/stop event numbers. Stop event number is excluded. If None start/stop is set automatically. chunk_size : int defines how many hits are analyzed in RAM. Bigger numbers increase the speed, too big numbers let the program crash with a memory error. condition : string A condition that is applied to the hits in numexpr style. Only if the expression evaluates to True the hit is taken. Returns ------- start_hit_word: int Index of the last hit word analyzed. Used to speed up the next call of write_hits_in_events.
[ "Selects", "the", "hits", "that", "occurred", "in", "given", "event", "range", "[", "event_start", "event_stop", "[", "and", "write", "them", "to", "a", "pytable", ".", "This", "function", "reduces", "the", "in", "RAM", "operations", "and", "has", "to", "be", "used", "if", "the", "get_data_in_event_range", "function", "raises", "a", "memory", "error", ".", "Also", "a", "condition", "can", "be", "set", "to", "select", "hits", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L944-L979
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_events_with_n_cluster
def get_events_with_n_cluster(event_number, condition='n_cluster==1'): '''Selects the events with a certain number of cluster. Parameters ---------- event_number : numpy.array Returns ------- numpy.array ''' logging.debug("Calculate events with clusters where " + condition) n_cluster_in_events = analysis_utils.get_n_cluster_in_events(event_number) n_cluster = n_cluster_in_events[:, 1] # return np.take(n_cluster_in_events, ne.evaluate(condition), axis=0) # does not return 1d, bug? return n_cluster_in_events[ne.evaluate(condition), 0]
python
def get_events_with_n_cluster(event_number, condition='n_cluster==1'): '''Selects the events with a certain number of cluster. Parameters ---------- event_number : numpy.array Returns ------- numpy.array ''' logging.debug("Calculate events with clusters where " + condition) n_cluster_in_events = analysis_utils.get_n_cluster_in_events(event_number) n_cluster = n_cluster_in_events[:, 1] # return np.take(n_cluster_in_events, ne.evaluate(condition), axis=0) # does not return 1d, bug? return n_cluster_in_events[ne.evaluate(condition), 0]
[ "def", "get_events_with_n_cluster", "(", "event_number", ",", "condition", "=", "'n_cluster==1'", ")", ":", "logging", ".", "debug", "(", "\"Calculate events with clusters where \"", "+", "condition", ")", "n_cluster_in_events", "=", "analysis_utils", ".", "get_n_cluster_in_events", "(", "event_number", ")", "n_cluster", "=", "n_cluster_in_events", "[", ":", ",", "1", "]", "# return np.take(n_cluster_in_events, ne.evaluate(condition), axis=0) # does not return 1d, bug?", "return", "n_cluster_in_events", "[", "ne", ".", "evaluate", "(", "condition", ")", ",", "0", "]" ]
Selects the events with a certain number of cluster. Parameters ---------- event_number : numpy.array Returns ------- numpy.array
[ "Selects", "the", "events", "with", "a", "certain", "number", "of", "cluster", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L982-L998
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_events_with_cluster_size
def get_events_with_cluster_size(event_number, cluster_size, condition='cluster_size==1'): '''Selects the events with cluster of a given cluster size. Parameters ---------- event_number : numpy.array cluster_size : numpy.array condition : string Returns ------- numpy.array ''' logging.debug("Calculate events with clusters with " + condition) return np.unique(event_number[ne.evaluate(condition)])
python
def get_events_with_cluster_size(event_number, cluster_size, condition='cluster_size==1'): '''Selects the events with cluster of a given cluster size. Parameters ---------- event_number : numpy.array cluster_size : numpy.array condition : string Returns ------- numpy.array ''' logging.debug("Calculate events with clusters with " + condition) return np.unique(event_number[ne.evaluate(condition)])
[ "def", "get_events_with_cluster_size", "(", "event_number", ",", "cluster_size", ",", "condition", "=", "'cluster_size==1'", ")", ":", "logging", ".", "debug", "(", "\"Calculate events with clusters with \"", "+", "condition", ")", "return", "np", ".", "unique", "(", "event_number", "[", "ne", ".", "evaluate", "(", "condition", ")", "]", ")" ]
Selects the events with cluster of a given cluster size. Parameters ---------- event_number : numpy.array cluster_size : numpy.array condition : string Returns ------- numpy.array
[ "Selects", "the", "events", "with", "cluster", "of", "a", "given", "cluster", "size", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1001-L1016
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_events_with_error_code
def get_events_with_error_code(event_number, event_status, select_mask=0b1111111111111111, condition=0b0000000000000000): '''Selects the events with a certain error code. Parameters ---------- event_number : numpy.array event_status : numpy.array select_mask : int The mask that selects the event error code to check. condition : int The value the selected event error code should have. Returns ------- numpy.array ''' logging.debug("Calculate events with certain error code") return np.unique(event_number[event_status & select_mask == condition])
python
def get_events_with_error_code(event_number, event_status, select_mask=0b1111111111111111, condition=0b0000000000000000): '''Selects the events with a certain error code. Parameters ---------- event_number : numpy.array event_status : numpy.array select_mask : int The mask that selects the event error code to check. condition : int The value the selected event error code should have. Returns ------- numpy.array ''' logging.debug("Calculate events with certain error code") return np.unique(event_number[event_status & select_mask == condition])
[ "def", "get_events_with_error_code", "(", "event_number", ",", "event_status", ",", "select_mask", "=", "0b1111111111111111", ",", "condition", "=", "0b0000000000000000", ")", ":", "logging", ".", "debug", "(", "\"Calculate events with certain error code\"", ")", "return", "np", ".", "unique", "(", "event_number", "[", "event_status", "&", "select_mask", "==", "condition", "]", ")" ]
Selects the events with a certain error code. Parameters ---------- event_number : numpy.array event_status : numpy.array select_mask : int The mask that selects the event error code to check. condition : int The value the selected event error code should have. Returns ------- numpy.array
[ "Selects", "the", "events", "with", "a", "certain", "error", "code", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1019-L1037
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_scan_parameter
def get_scan_parameter(meta_data_array, unique=True): '''Takes the numpy meta data array and returns the different scan parameter settings and the name aligned in a dictionary Parameters ---------- meta_data_array : numpy.ndarray unique: boolean If true only unique values for each scan parameter are returned Returns ------- python.dict{string, numpy.Histogram}: A dictionary with the scan parameter name/values pairs ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return scan_parameters = collections.OrderedDict() for scan_par_name in meta_data_array.dtype.names[4:]: # scan parameters are in columns 5 (= index 4) and above scan_parameters[scan_par_name] = np.unique(meta_data_array[scan_par_name]) if unique else meta_data_array[scan_par_name] return scan_parameters
python
def get_scan_parameter(meta_data_array, unique=True): '''Takes the numpy meta data array and returns the different scan parameter settings and the name aligned in a dictionary Parameters ---------- meta_data_array : numpy.ndarray unique: boolean If true only unique values for each scan parameter are returned Returns ------- python.dict{string, numpy.Histogram}: A dictionary with the scan parameter name/values pairs ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return scan_parameters = collections.OrderedDict() for scan_par_name in meta_data_array.dtype.names[4:]: # scan parameters are in columns 5 (= index 4) and above scan_parameters[scan_par_name] = np.unique(meta_data_array[scan_par_name]) if unique else meta_data_array[scan_par_name] return scan_parameters
[ "def", "get_scan_parameter", "(", "meta_data_array", ",", "unique", "=", "True", ")", ":", "try", ":", "last_not_parameter_column", "=", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "'error_code'", ")", "# for interpreted meta_data", "except", "ValueError", ":", "last_not_parameter_column", "=", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "'error'", ")", "# for raw data file meta_data", "if", "last_not_parameter_column", "==", "len", "(", "meta_data_array", ".", "dtype", ".", "names", ")", "-", "1", ":", "# no meta_data found", "return", "scan_parameters", "=", "collections", ".", "OrderedDict", "(", ")", "for", "scan_par_name", "in", "meta_data_array", ".", "dtype", ".", "names", "[", "4", ":", "]", ":", "# scan parameters are in columns 5 (= index 4) and above", "scan_parameters", "[", "scan_par_name", "]", "=", "np", ".", "unique", "(", "meta_data_array", "[", "scan_par_name", "]", ")", "if", "unique", "else", "meta_data_array", "[", "scan_par_name", "]", "return", "scan_parameters" ]
Takes the numpy meta data array and returns the different scan parameter settings and the name aligned in a dictionary Parameters ---------- meta_data_array : numpy.ndarray unique: boolean If true only unique values for each scan parameter are returned Returns ------- python.dict{string, numpy.Histogram}: A dictionary with the scan parameter name/values pairs
[ "Takes", "the", "numpy", "meta", "data", "array", "and", "returns", "the", "different", "scan", "parameter", "settings", "and", "the", "name", "aligned", "in", "a", "dictionary" ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1040-L1064
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_scan_parameters_table_from_meta_data
def get_scan_parameters_table_from_meta_data(meta_data_array, scan_parameters=None): '''Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data . Parameters ---------- meta_data_array : numpy.ndarray The array with the scan parameters. scan_parameters : list of strings The name of the scan parameters to take. If none all are used. Returns ------- numpy.Histogram ''' if scan_parameters is None: try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: return if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return # http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array scan_par_data = {name: meta_data_array.dtype.fields[name] for name in meta_data_array.dtype.names[last_not_parameter_column + 1:]} else: scan_par_data = collections.OrderedDict() for name in scan_parameters: scan_par_data[name] = meta_data_array.dtype.fields[name] return np.ndarray(meta_data_array.shape, np.dtype(scan_par_data), meta_data_array, 0, meta_data_array.strides)
python
def get_scan_parameters_table_from_meta_data(meta_data_array, scan_parameters=None): '''Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data . Parameters ---------- meta_data_array : numpy.ndarray The array with the scan parameters. scan_parameters : list of strings The name of the scan parameters to take. If none all are used. Returns ------- numpy.Histogram ''' if scan_parameters is None: try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: return if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return # http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array scan_par_data = {name: meta_data_array.dtype.fields[name] for name in meta_data_array.dtype.names[last_not_parameter_column + 1:]} else: scan_par_data = collections.OrderedDict() for name in scan_parameters: scan_par_data[name] = meta_data_array.dtype.fields[name] return np.ndarray(meta_data_array.shape, np.dtype(scan_par_data), meta_data_array, 0, meta_data_array.strides)
[ "def", "get_scan_parameters_table_from_meta_data", "(", "meta_data_array", ",", "scan_parameters", "=", "None", ")", ":", "if", "scan_parameters", "is", "None", ":", "try", ":", "last_not_parameter_column", "=", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "'error_code'", ")", "# for interpreted meta_data", "except", "ValueError", ":", "return", "if", "last_not_parameter_column", "==", "len", "(", "meta_data_array", ".", "dtype", ".", "names", ")", "-", "1", ":", "# no meta_data found", "return", "# http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array", "scan_par_data", "=", "{", "name", ":", "meta_data_array", ".", "dtype", ".", "fields", "[", "name", "]", "for", "name", "in", "meta_data_array", ".", "dtype", ".", "names", "[", "last_not_parameter_column", "+", "1", ":", "]", "}", "else", ":", "scan_par_data", "=", "collections", ".", "OrderedDict", "(", ")", "for", "name", "in", "scan_parameters", ":", "scan_par_data", "[", "name", "]", "=", "meta_data_array", ".", "dtype", ".", "fields", "[", "name", "]", "return", "np", ".", "ndarray", "(", "meta_data_array", ".", "shape", ",", "np", ".", "dtype", "(", "scan_par_data", ")", ",", "meta_data_array", ",", "0", ",", "meta_data_array", ".", "strides", ")" ]
Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data . Parameters ---------- meta_data_array : numpy.ndarray The array with the scan parameters. scan_parameters : list of strings The name of the scan parameters to take. If none all are used. Returns ------- numpy.Histogram
[ "Takes", "the", "meta", "data", "array", "and", "returns", "the", "scan", "parameter", "values", "as", "a", "view", "of", "a", "numpy", "array", "only", "containing", "the", "parameter", "data", ".", "Parameters", "----------", "meta_data_array", ":", "numpy", ".", "ndarray", "The", "array", "with", "the", "scan", "parameters", ".", "scan_parameters", ":", "list", "of", "strings", "The", "name", "of", "the", "scan", "parameters", "to", "take", ".", "If", "none", "all", "are", "used", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1067-L1095
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_scan_parameters_index
def get_scan_parameters_index(scan_parameter): '''Takes the scan parameter array and creates a scan parameter index labeling the unique scan parameter combinations. Parameters ---------- scan_parameter : numpy.ndarray The table with the scan parameters. Returns ------- numpy.Histogram ''' _, index = np.unique(scan_parameter, return_index=True) index = np.sort(index) values = np.array(range(0, len(index)), dtype='i4') index = np.append(index, len(scan_parameter)) counts = np.diff(index) return np.repeat(values, counts)
python
def get_scan_parameters_index(scan_parameter): '''Takes the scan parameter array and creates a scan parameter index labeling the unique scan parameter combinations. Parameters ---------- scan_parameter : numpy.ndarray The table with the scan parameters. Returns ------- numpy.Histogram ''' _, index = np.unique(scan_parameter, return_index=True) index = np.sort(index) values = np.array(range(0, len(index)), dtype='i4') index = np.append(index, len(scan_parameter)) counts = np.diff(index) return np.repeat(values, counts)
[ "def", "get_scan_parameters_index", "(", "scan_parameter", ")", ":", "_", ",", "index", "=", "np", ".", "unique", "(", "scan_parameter", ",", "return_index", "=", "True", ")", "index", "=", "np", ".", "sort", "(", "index", ")", "values", "=", "np", ".", "array", "(", "range", "(", "0", ",", "len", "(", "index", ")", ")", ",", "dtype", "=", "'i4'", ")", "index", "=", "np", ".", "append", "(", "index", ",", "len", "(", "scan_parameter", ")", ")", "counts", "=", "np", ".", "diff", "(", "index", ")", "return", "np", ".", "repeat", "(", "values", ",", "counts", ")" ]
Takes the scan parameter array and creates a scan parameter index labeling the unique scan parameter combinations. Parameters ---------- scan_parameter : numpy.ndarray The table with the scan parameters. Returns ------- numpy.Histogram
[ "Takes", "the", "scan", "parameter", "array", "and", "creates", "a", "scan", "parameter", "index", "labeling", "the", "unique", "scan", "parameter", "combinations", ".", "Parameters", "----------", "scan_parameter", ":", "numpy", ".", "ndarray", "The", "table", "with", "the", "scan", "parameters", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1098-L1114
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
get_unique_scan_parameter_combinations
def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False): '''Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters. If selected columns only is true, the returned histogram only contains the selected columns. Parameters ---------- meta_data_array : numpy.ndarray scan_parameters : list of string, None Scan parameter names taken. If None all are used. selected_columns_only : bool Returns ------- numpy.Histogram ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return if scan_parameters is None: return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only) else: use_columns = [] for scan_parameter in scan_parameters: try: use_columns.append(meta_data_array.dtype.names.index(scan_parameter)) except ValueError: logging.error('No scan parameter ' + scan_parameter + ' found') raise RuntimeError('Scan parameter not found') return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)
python
def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False): '''Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters. If selected columns only is true, the returned histogram only contains the selected columns. Parameters ---------- meta_data_array : numpy.ndarray scan_parameters : list of string, None Scan parameter names taken. If None all are used. selected_columns_only : bool Returns ------- numpy.Histogram ''' try: last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data except ValueError: last_not_parameter_column = meta_data_array.dtype.names.index('error') # for raw data file meta_data if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found return if scan_parameters is None: return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only) else: use_columns = [] for scan_parameter in scan_parameters: try: use_columns.append(meta_data_array.dtype.names.index(scan_parameter)) except ValueError: logging.error('No scan parameter ' + scan_parameter + ' found') raise RuntimeError('Scan parameter not found') return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)
[ "def", "get_unique_scan_parameter_combinations", "(", "meta_data_array", ",", "scan_parameters", "=", "None", ",", "scan_parameter_columns_only", "=", "False", ")", ":", "try", ":", "last_not_parameter_column", "=", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "'error_code'", ")", "# for interpreted meta_data", "except", "ValueError", ":", "last_not_parameter_column", "=", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "'error'", ")", "# for raw data file meta_data", "if", "last_not_parameter_column", "==", "len", "(", "meta_data_array", ".", "dtype", ".", "names", ")", "-", "1", ":", "# no meta_data found", "return", "if", "scan_parameters", "is", "None", ":", "return", "unique_row", "(", "meta_data_array", ",", "use_columns", "=", "range", "(", "4", ",", "len", "(", "meta_data_array", ".", "dtype", ".", "names", ")", ")", ",", "selected_columns_only", "=", "scan_parameter_columns_only", ")", "else", ":", "use_columns", "=", "[", "]", "for", "scan_parameter", "in", "scan_parameters", ":", "try", ":", "use_columns", ".", "append", "(", "meta_data_array", ".", "dtype", ".", "names", ".", "index", "(", "scan_parameter", ")", ")", "except", "ValueError", ":", "logging", ".", "error", "(", "'No scan parameter '", "+", "scan_parameter", "+", "' found'", ")", "raise", "RuntimeError", "(", "'Scan parameter not found'", ")", "return", "unique_row", "(", "meta_data_array", ",", "use_columns", "=", "use_columns", ",", "selected_columns_only", "=", "scan_parameter_columns_only", ")" ]
Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters. If selected columns only is true, the returned histogram only contains the selected columns. Parameters ---------- meta_data_array : numpy.ndarray scan_parameters : list of string, None Scan parameter names taken. If None all are used. selected_columns_only : bool Returns ------- numpy.Histogram
[ "Takes", "the", "numpy", "meta", "data", "array", "and", "returns", "the", "first", "rows", "with", "unique", "combinations", "of", "different", "scan", "parameter", "values", "for", "selected", "scan", "parameters", ".", "If", "selected", "columns", "only", "is", "true", "the", "returned", "histogram", "only", "contains", "the", "selected", "columns", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1117-L1149
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
data_aligned_at_events
def data_aligned_at_events(table, start_event_number=None, stop_event_number=None, start_index=None, stop_index=None, chunk_size=10000000, try_speedup=False, first_event_aligned=True, fail_on_missing_events=True): '''Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted. Additional parameters can be set to increase the readout speed. Events between a certain range can be selected. Also the start and the stop indices limiting the table size can be specified to improve performance. The event_number column must be sorted. In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down. Parameters ---------- table : pytables.table The data. start_event_number : int The retruned data contains events with event number >= start_event_number. If None, no limit is set. stop_event_number : int The retruned data contains events with event number < stop_event_number. If None, no limit is set. start_index : int Start index of data. If None, no limit is set. stop_index : int Stop index of data. If None, no limit is set. chunk_size : int Maximum chunk size per read. try_speedup : bool If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually not in the data this speedup can even slow down the function! The following parameters are not used when try_speedup is True: first_event_aligned : bool If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out. fail_on_missing_events : bool If True, an error is given when start_event_number or stop_event_number is not part of the data. Returns ------- Iterator of tuples Data of the actual data chunk and start index for the next chunk. Example ------- start_index = 0 for scan_parameter in scan_parameter_range: start_event_number, stop_event_number = event_select_function(scan_parameter) for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index): do_something(data) for data, index in data_aligned_at_events(table): do_something(data) ''' # initialize variables start_index_known = False stop_index_known = False start_index = 0 if start_index is None else start_index stop_index = table.nrows if stop_index is None else stop_index if stop_index < start_index: raise InvalidInputError('Invalid start/stop index') table_max_rows = table.nrows if stop_event_number is not None and start_event_number is not None and stop_event_number < start_event_number: raise InvalidInputError('Invalid start/stop event number') # set start stop indices from the event numbers for fast read if possible; not possible if the given event number does not exist in the data stream if try_speedup and table.colindexed["event_number"]: if start_event_number is not None: start_condition = 'event_number==' + str(start_event_number) start_indices = table.get_where_list(start_condition, start=start_index, stop=stop_index) if start_indices.shape[0] != 0: # set start index if possible start_index = start_indices[0] start_index_known = True if stop_event_number is not None: stop_condition = 'event_number==' + str(stop_event_number) stop_indices = table.get_where_list(stop_condition, start=start_index, stop=stop_index) if stop_indices.shape[0] != 0: # set the stop index if possible, stop index is excluded stop_index = stop_indices[0] stop_index_known = True if start_index_known and stop_index_known and start_index + chunk_size >= stop_index: # special case, one read is enough, data not bigger than one chunk and the indices are known yield table.read(start=start_index, stop=stop_index), stop_index else: # read data in chunks, chunks do not divide events, abort if stop_event_number is reached # search for begin current_start_index = start_index if start_event_number is not None: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 last_event_in_chunk = array_chunk["event_number"][-1] if last_event_in_chunk < start_event_number: current_start_index = current_start_index + chunk_size # not there yet, continue to next read (assuming sorted events) else: first_event_in_chunk = array_chunk["event_number"][0] # if stop_event_number is not None and first_event_in_chunk >= stop_event_number and start_index != 0 and start_index == current_start_index: # raise InvalidInputError('The stop event %d is missing. Change stop_event_number.' % stop_event_number) if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') if not first_event_aligned and first_event_in_chunk == start_event_number and start_index != 0 and start_index == current_start_index: # first event in first chunk not aligned at index 0, so take next event if fail_on_missing_events: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number + 1, side='left') elif fail_on_missing_events and first_event_in_chunk > start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) elif first_event_aligned and first_event_in_chunk == start_event_number and start_index == current_start_index: chunk_start_index = 0 else: chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number, side='left') if fail_on_missing_events and array_chunk["event_number"][chunk_start_index] != start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) # if fail_on_missing_events and ((start_index == current_start_index and chunk_start_index == 0 and start_index != 0 and not first_event_aligned) or array_chunk["event_number"][chunk_start_index] != start_event_number): # raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) current_start_index = current_start_index + chunk_start_index # calculate index for next loop break elif not first_event_aligned and start_index != 0: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') chunk_start_index = np.searchsorted(array_chunk["event_number"], first_event_in_chunk + 1, side='left') current_start_index = current_start_index + chunk_start_index if not first_event_in_chunk == last_event_in_chunk: break # data loop while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] chunk_start_index = 0 if stop_event_number is None: if current_stop_index == table_max_rows: chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') else: if last_event_in_chunk >= stop_event_number: chunk_stop_index = np.searchsorted(array_chunk["event_number"], stop_event_number, side='left') elif current_stop_index == table_max_rows: # this will also add the last event of the table chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') nrows = chunk_stop_index - chunk_start_index if nrows == 0: if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small to fit event. Data corruption possible. Increase chunk size to read full event.') elif chunk_start_index == 0: # not increasing current_start_index return elif stop_event_number is not None and last_event_in_chunk >= stop_event_number: return else: yield array_chunk[chunk_start_index:chunk_stop_index], current_start_index + nrows + chunk_start_index current_start_index = current_start_index + nrows + chunk_start_index
python
def data_aligned_at_events(table, start_event_number=None, stop_event_number=None, start_index=None, stop_index=None, chunk_size=10000000, try_speedup=False, first_event_aligned=True, fail_on_missing_events=True): '''Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted. Additional parameters can be set to increase the readout speed. Events between a certain range can be selected. Also the start and the stop indices limiting the table size can be specified to improve performance. The event_number column must be sorted. In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down. Parameters ---------- table : pytables.table The data. start_event_number : int The retruned data contains events with event number >= start_event_number. If None, no limit is set. stop_event_number : int The retruned data contains events with event number < stop_event_number. If None, no limit is set. start_index : int Start index of data. If None, no limit is set. stop_index : int Stop index of data. If None, no limit is set. chunk_size : int Maximum chunk size per read. try_speedup : bool If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually not in the data this speedup can even slow down the function! The following parameters are not used when try_speedup is True: first_event_aligned : bool If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out. fail_on_missing_events : bool If True, an error is given when start_event_number or stop_event_number is not part of the data. Returns ------- Iterator of tuples Data of the actual data chunk and start index for the next chunk. Example ------- start_index = 0 for scan_parameter in scan_parameter_range: start_event_number, stop_event_number = event_select_function(scan_parameter) for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index): do_something(data) for data, index in data_aligned_at_events(table): do_something(data) ''' # initialize variables start_index_known = False stop_index_known = False start_index = 0 if start_index is None else start_index stop_index = table.nrows if stop_index is None else stop_index if stop_index < start_index: raise InvalidInputError('Invalid start/stop index') table_max_rows = table.nrows if stop_event_number is not None and start_event_number is not None and stop_event_number < start_event_number: raise InvalidInputError('Invalid start/stop event number') # set start stop indices from the event numbers for fast read if possible; not possible if the given event number does not exist in the data stream if try_speedup and table.colindexed["event_number"]: if start_event_number is not None: start_condition = 'event_number==' + str(start_event_number) start_indices = table.get_where_list(start_condition, start=start_index, stop=stop_index) if start_indices.shape[0] != 0: # set start index if possible start_index = start_indices[0] start_index_known = True if stop_event_number is not None: stop_condition = 'event_number==' + str(stop_event_number) stop_indices = table.get_where_list(stop_condition, start=start_index, stop=stop_index) if stop_indices.shape[0] != 0: # set the stop index if possible, stop index is excluded stop_index = stop_indices[0] stop_index_known = True if start_index_known and stop_index_known and start_index + chunk_size >= stop_index: # special case, one read is enough, data not bigger than one chunk and the indices are known yield table.read(start=start_index, stop=stop_index), stop_index else: # read data in chunks, chunks do not divide events, abort if stop_event_number is reached # search for begin current_start_index = start_index if start_event_number is not None: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 last_event_in_chunk = array_chunk["event_number"][-1] if last_event_in_chunk < start_event_number: current_start_index = current_start_index + chunk_size # not there yet, continue to next read (assuming sorted events) else: first_event_in_chunk = array_chunk["event_number"][0] # if stop_event_number is not None and first_event_in_chunk >= stop_event_number and start_index != 0 and start_index == current_start_index: # raise InvalidInputError('The stop event %d is missing. Change stop_event_number.' % stop_event_number) if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') if not first_event_aligned and first_event_in_chunk == start_event_number and start_index != 0 and start_index == current_start_index: # first event in first chunk not aligned at index 0, so take next event if fail_on_missing_events: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number + 1, side='left') elif fail_on_missing_events and first_event_in_chunk > start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) elif first_event_aligned and first_event_in_chunk == start_event_number and start_index == current_start_index: chunk_start_index = 0 else: chunk_start_index = np.searchsorted(array_chunk["event_number"], start_event_number, side='left') if fail_on_missing_events and array_chunk["event_number"][chunk_start_index] != start_event_number and start_index == current_start_index: raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) # if fail_on_missing_events and ((start_index == current_start_index and chunk_start_index == 0 and start_index != 0 and not first_event_aligned) or array_chunk["event_number"][chunk_start_index] != start_event_number): # raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number) current_start_index = current_start_index + chunk_start_index # calculate index for next loop break elif not first_event_aligned and start_index != 0: while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small. Increase chunk size to fit full event.') chunk_start_index = np.searchsorted(array_chunk["event_number"], first_event_in_chunk + 1, side='left') current_start_index = current_start_index + chunk_start_index if not first_event_in_chunk == last_event_in_chunk: break # data loop while current_start_index < stop_index: current_stop_index = min(current_start_index + chunk_size, stop_index) array_chunk = table.read(start=current_start_index, stop=current_stop_index) # stop index is exclusive, so add 1 first_event_in_chunk = array_chunk["event_number"][0] last_event_in_chunk = array_chunk["event_number"][-1] chunk_start_index = 0 if stop_event_number is None: if current_stop_index == table_max_rows: chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') else: if last_event_in_chunk >= stop_event_number: chunk_stop_index = np.searchsorted(array_chunk["event_number"], stop_event_number, side='left') elif current_stop_index == table_max_rows: # this will also add the last event of the table chunk_stop_index = array_chunk.shape[0] else: chunk_stop_index = np.searchsorted(array_chunk["event_number"], last_event_in_chunk, side='left') nrows = chunk_stop_index - chunk_start_index if nrows == 0: if array_chunk.shape[0] == chunk_size and first_event_in_chunk == last_event_in_chunk: raise InvalidInputError('Chunk size too small to fit event. Data corruption possible. Increase chunk size to read full event.') elif chunk_start_index == 0: # not increasing current_start_index return elif stop_event_number is not None and last_event_in_chunk >= stop_event_number: return else: yield array_chunk[chunk_start_index:chunk_stop_index], current_start_index + nrows + chunk_start_index current_start_index = current_start_index + nrows + chunk_start_index
[ "def", "data_aligned_at_events", "(", "table", ",", "start_event_number", "=", "None", ",", "stop_event_number", "=", "None", ",", "start_index", "=", "None", ",", "stop_index", "=", "None", ",", "chunk_size", "=", "10000000", ",", "try_speedup", "=", "False", ",", "first_event_aligned", "=", "True", ",", "fail_on_missing_events", "=", "True", ")", ":", "# initialize variables", "start_index_known", "=", "False", "stop_index_known", "=", "False", "start_index", "=", "0", "if", "start_index", "is", "None", "else", "start_index", "stop_index", "=", "table", ".", "nrows", "if", "stop_index", "is", "None", "else", "stop_index", "if", "stop_index", "<", "start_index", ":", "raise", "InvalidInputError", "(", "'Invalid start/stop index'", ")", "table_max_rows", "=", "table", ".", "nrows", "if", "stop_event_number", "is", "not", "None", "and", "start_event_number", "is", "not", "None", "and", "stop_event_number", "<", "start_event_number", ":", "raise", "InvalidInputError", "(", "'Invalid start/stop event number'", ")", "# set start stop indices from the event numbers for fast read if possible; not possible if the given event number does not exist in the data stream", "if", "try_speedup", "and", "table", ".", "colindexed", "[", "\"event_number\"", "]", ":", "if", "start_event_number", "is", "not", "None", ":", "start_condition", "=", "'event_number=='", "+", "str", "(", "start_event_number", ")", "start_indices", "=", "table", ".", "get_where_list", "(", "start_condition", ",", "start", "=", "start_index", ",", "stop", "=", "stop_index", ")", "if", "start_indices", ".", "shape", "[", "0", "]", "!=", "0", ":", "# set start index if possible", "start_index", "=", "start_indices", "[", "0", "]", "start_index_known", "=", "True", "if", "stop_event_number", "is", "not", "None", ":", "stop_condition", "=", "'event_number=='", "+", "str", "(", "stop_event_number", ")", "stop_indices", "=", "table", ".", "get_where_list", "(", "stop_condition", ",", "start", "=", "start_index", ",", "stop", "=", "stop_index", ")", "if", "stop_indices", ".", "shape", "[", "0", "]", "!=", "0", ":", "# set the stop index if possible, stop index is excluded", "stop_index", "=", "stop_indices", "[", "0", "]", "stop_index_known", "=", "True", "if", "start_index_known", "and", "stop_index_known", "and", "start_index", "+", "chunk_size", ">=", "stop_index", ":", "# special case, one read is enough, data not bigger than one chunk and the indices are known", "yield", "table", ".", "read", "(", "start", "=", "start_index", ",", "stop", "=", "stop_index", ")", ",", "stop_index", "else", ":", "# read data in chunks, chunks do not divide events, abort if stop_event_number is reached", "# search for begin", "current_start_index", "=", "start_index", "if", "start_event_number", "is", "not", "None", ":", "while", "current_start_index", "<", "stop_index", ":", "current_stop_index", "=", "min", "(", "current_start_index", "+", "chunk_size", ",", "stop_index", ")", "array_chunk", "=", "table", ".", "read", "(", "start", "=", "current_start_index", ",", "stop", "=", "current_stop_index", ")", "# stop index is exclusive, so add 1", "last_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "-", "1", "]", "if", "last_event_in_chunk", "<", "start_event_number", ":", "current_start_index", "=", "current_start_index", "+", "chunk_size", "# not there yet, continue to next read (assuming sorted events)", "else", ":", "first_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "0", "]", "# if stop_event_number is not None and first_event_in_chunk >= stop_event_number and start_index != 0 and start_index == current_start_index:", "# raise InvalidInputError('The stop event %d is missing. Change stop_event_number.' % stop_event_number)", "if", "array_chunk", ".", "shape", "[", "0", "]", "==", "chunk_size", "and", "first_event_in_chunk", "==", "last_event_in_chunk", ":", "raise", "InvalidInputError", "(", "'Chunk size too small. Increase chunk size to fit full event.'", ")", "if", "not", "first_event_aligned", "and", "first_event_in_chunk", "==", "start_event_number", "and", "start_index", "!=", "0", "and", "start_index", "==", "current_start_index", ":", "# first event in first chunk not aligned at index 0, so take next event", "if", "fail_on_missing_events", ":", "raise", "InvalidInputError", "(", "'The start event %d is missing. Change start_event_number.'", "%", "start_event_number", ")", "chunk_start_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "start_event_number", "+", "1", ",", "side", "=", "'left'", ")", "elif", "fail_on_missing_events", "and", "first_event_in_chunk", ">", "start_event_number", "and", "start_index", "==", "current_start_index", ":", "raise", "InvalidInputError", "(", "'The start event %d is missing. Change start_event_number.'", "%", "start_event_number", ")", "elif", "first_event_aligned", "and", "first_event_in_chunk", "==", "start_event_number", "and", "start_index", "==", "current_start_index", ":", "chunk_start_index", "=", "0", "else", ":", "chunk_start_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "start_event_number", ",", "side", "=", "'left'", ")", "if", "fail_on_missing_events", "and", "array_chunk", "[", "\"event_number\"", "]", "[", "chunk_start_index", "]", "!=", "start_event_number", "and", "start_index", "==", "current_start_index", ":", "raise", "InvalidInputError", "(", "'The start event %d is missing. Change start_event_number.'", "%", "start_event_number", ")", "# if fail_on_missing_events and ((start_index == current_start_index and chunk_start_index == 0 and start_index != 0 and not first_event_aligned) or array_chunk[\"event_number\"][chunk_start_index] != start_event_number):", "# raise InvalidInputError('The start event %d is missing. Change start_event_number.' % start_event_number)", "current_start_index", "=", "current_start_index", "+", "chunk_start_index", "# calculate index for next loop", "break", "elif", "not", "first_event_aligned", "and", "start_index", "!=", "0", ":", "while", "current_start_index", "<", "stop_index", ":", "current_stop_index", "=", "min", "(", "current_start_index", "+", "chunk_size", ",", "stop_index", ")", "array_chunk", "=", "table", ".", "read", "(", "start", "=", "current_start_index", ",", "stop", "=", "current_stop_index", ")", "# stop index is exclusive, so add 1", "first_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "0", "]", "last_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "-", "1", "]", "if", "array_chunk", ".", "shape", "[", "0", "]", "==", "chunk_size", "and", "first_event_in_chunk", "==", "last_event_in_chunk", ":", "raise", "InvalidInputError", "(", "'Chunk size too small. Increase chunk size to fit full event.'", ")", "chunk_start_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "first_event_in_chunk", "+", "1", ",", "side", "=", "'left'", ")", "current_start_index", "=", "current_start_index", "+", "chunk_start_index", "if", "not", "first_event_in_chunk", "==", "last_event_in_chunk", ":", "break", "# data loop", "while", "current_start_index", "<", "stop_index", ":", "current_stop_index", "=", "min", "(", "current_start_index", "+", "chunk_size", ",", "stop_index", ")", "array_chunk", "=", "table", ".", "read", "(", "start", "=", "current_start_index", ",", "stop", "=", "current_stop_index", ")", "# stop index is exclusive, so add 1", "first_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "0", "]", "last_event_in_chunk", "=", "array_chunk", "[", "\"event_number\"", "]", "[", "-", "1", "]", "chunk_start_index", "=", "0", "if", "stop_event_number", "is", "None", ":", "if", "current_stop_index", "==", "table_max_rows", ":", "chunk_stop_index", "=", "array_chunk", ".", "shape", "[", "0", "]", "else", ":", "chunk_stop_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "last_event_in_chunk", ",", "side", "=", "'left'", ")", "else", ":", "if", "last_event_in_chunk", ">=", "stop_event_number", ":", "chunk_stop_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "stop_event_number", ",", "side", "=", "'left'", ")", "elif", "current_stop_index", "==", "table_max_rows", ":", "# this will also add the last event of the table", "chunk_stop_index", "=", "array_chunk", ".", "shape", "[", "0", "]", "else", ":", "chunk_stop_index", "=", "np", ".", "searchsorted", "(", "array_chunk", "[", "\"event_number\"", "]", ",", "last_event_in_chunk", ",", "side", "=", "'left'", ")", "nrows", "=", "chunk_stop_index", "-", "chunk_start_index", "if", "nrows", "==", "0", ":", "if", "array_chunk", ".", "shape", "[", "0", "]", "==", "chunk_size", "and", "first_event_in_chunk", "==", "last_event_in_chunk", ":", "raise", "InvalidInputError", "(", "'Chunk size too small to fit event. Data corruption possible. Increase chunk size to read full event.'", ")", "elif", "chunk_start_index", "==", "0", ":", "# not increasing current_start_index", "return", "elif", "stop_event_number", "is", "not", "None", "and", "last_event_in_chunk", ">=", "stop_event_number", ":", "return", "else", ":", "yield", "array_chunk", "[", "chunk_start_index", ":", "chunk_stop_index", "]", ",", "current_start_index", "+", "nrows", "+", "chunk_start_index", "current_start_index", "=", "current_start_index", "+", "nrows", "+", "chunk_start_index" ]
Takes the table with a event_number column and returns chunks with the size up to chunk_size. The chunks are chosen in a way that the events are not splitted. Additional parameters can be set to increase the readout speed. Events between a certain range can be selected. Also the start and the stop indices limiting the table size can be specified to improve performance. The event_number column must be sorted. In case of try_speedup is True, it is important to create an index of event_number column with pytables before using this function. Otherwise the queries are slowed down. Parameters ---------- table : pytables.table The data. start_event_number : int The retruned data contains events with event number >= start_event_number. If None, no limit is set. stop_event_number : int The retruned data contains events with event number < stop_event_number. If None, no limit is set. start_index : int Start index of data. If None, no limit is set. stop_index : int Stop index of data. If None, no limit is set. chunk_size : int Maximum chunk size per read. try_speedup : bool If True, try to reduce the index range to read by searching for the indices of start and stop event number. If these event numbers are usually not in the data this speedup can even slow down the function! The following parameters are not used when try_speedup is True: first_event_aligned : bool If True, assuming that the first event is aligned to the data chunk and will be added. If False, the lowest event number of the first chunk will not be read out. fail_on_missing_events : bool If True, an error is given when start_event_number or stop_event_number is not part of the data. Returns ------- Iterator of tuples Data of the actual data chunk and start index for the next chunk. Example ------- start_index = 0 for scan_parameter in scan_parameter_range: start_event_number, stop_event_number = event_select_function(scan_parameter) for data, start_index in data_aligned_at_events(table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=start_index): do_something(data) for data, index in data_aligned_at_events(table): do_something(data)
[ "Takes", "the", "table", "with", "a", "event_number", "column", "and", "returns", "chunks", "with", "the", "size", "up", "to", "chunk_size", ".", "The", "chunks", "are", "chosen", "in", "a", "way", "that", "the", "events", "are", "not", "splitted", ".", "Additional", "parameters", "can", "be", "set", "to", "increase", "the", "readout", "speed", ".", "Events", "between", "a", "certain", "range", "can", "be", "selected", ".", "Also", "the", "start", "and", "the", "stop", "indices", "limiting", "the", "table", "size", "can", "be", "specified", "to", "improve", "performance", ".", "The", "event_number", "column", "must", "be", "sorted", ".", "In", "case", "of", "try_speedup", "is", "True", "it", "is", "important", "to", "create", "an", "index", "of", "event_number", "column", "with", "pytables", "before", "using", "this", "function", ".", "Otherwise", "the", "queries", "are", "slowed", "down", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1160-L1320
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
select_good_pixel_region
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0): '''Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels. ''' hits = np.sum(hits, axis=(-1)).astype('u8') mask = np.ones(shape=(80, 336), dtype=np.uint8) mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0 ma = np.ma.masked_where(mask, hits) if max_cut_threshold is not None: return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma) else: return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma)
python
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0): '''Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels. ''' hits = np.sum(hits, axis=(-1)).astype('u8') mask = np.ones(shape=(80, 336), dtype=np.uint8) mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0 ma = np.ma.masked_where(mask, hits) if max_cut_threshold is not None: return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma) else: return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma)
[ "def", "select_good_pixel_region", "(", "hits", ",", "col_span", ",", "row_span", ",", "min_cut_threshold", "=", "0.2", ",", "max_cut_threshold", "=", "2.0", ")", ":", "hits", "=", "np", ".", "sum", "(", "hits", ",", "axis", "=", "(", "-", "1", ")", ")", ".", "astype", "(", "'u8'", ")", "mask", "=", "np", ".", "ones", "(", "shape", "=", "(", "80", ",", "336", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "mask", "[", "min", "(", "col_span", ")", ":", "max", "(", "col_span", ")", "+", "1", ",", "min", "(", "row_span", ")", ":", "max", "(", "row_span", ")", "+", "1", "]", "=", "0", "ma", "=", "np", ".", "ma", ".", "masked_where", "(", "mask", ",", "hits", ")", "if", "max_cut_threshold", "is", "not", "None", ":", "return", "np", ".", "ma", ".", "masked_where", "(", "np", ".", "logical_or", "(", "ma", "<", "min_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ",", "ma", ">", "max_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ")", ",", "ma", ")", "else", ":", "return", "np", ".", "ma", ".", "masked_where", "(", "ma", "<", "min_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ",", "ma", ")" ]
Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels.
[ "Takes", "the", "hit", "array", "and", "masks", "all", "pixels", "with", "a", "certain", "occupancy", "." ]
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1323-L1353