Search is not available for this dataset
text stringlengths 75 104k |
|---|
def require_authentication(self, realm, environ):
"""Return True if this realm requires authentication (grant anonymous access otherwise)."""
realm_entry = self._get_realm_entry(realm)
if realm_entry is None:
_logger.error(
'Missing configuration simple_dc.user_mapping["{}"] (or "*"): '
"realm is not accessible!".format(realm)
)
return realm_entry is not True |
def basic_auth_user(self, realm, user_name, password, environ):
"""Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication."""
user = self._get_realm_entry(realm, user_name)
if user is not None and password == user.get("password"):
environ["wsgidav.auth.roles"] = user.get("roles", [])
return True
return False |
def digest_auth_user(self, realm, user_name, environ):
"""Computes digest hash A1 part."""
user = self._get_realm_entry(realm, user_name)
if user is None:
return False
password = user.get("password")
environ["wsgidav.auth.roles"] = user.get("roles", [])
return self._compute_http_digest_a1(realm, user_name, password) |
def get(self, token):
"""Return a lock dictionary for a token.
If the lock does not exist or is expired, None is returned.
token:
lock token
Returns:
Lock dictionary or <None>
Side effect: if lock is expired, it will be purged and None is returned.
"""
self._lock.acquire_read()
try:
lock = self._dict.get(token)
if lock is None:
# Lock not found: purge dangling URL2TOKEN entries
_logger.debug("Lock purged dangling: {}".format(token))
self.delete(token)
return None
expire = float(lock["expire"])
if expire >= 0 and expire < time.time():
_logger.debug(
"Lock timed-out({}): {}".format(expire, lock_string(lock))
)
self.delete(token)
return None
return lock
finally:
self._lock.release() |
def create(self, path, lock):
"""Create a direct lock for a resource path.
path:
Normalized path (utf8 encoded string, no trailing '/')
lock:
lock dictionary, without a token entry
Returns:
New unique lock token.: <lock
**Note:** the lock dictionary may be modified on return:
- lock['root'] is ignored and set to the normalized <path>
- lock['timeout'] may be normalized and shorter than requested
- lock['token'] is added
"""
self._lock.acquire_write()
try:
# We expect only a lock definition, not an existing lock
assert lock.get("token") is None
assert lock.get("expire") is None, "Use timeout instead of expire"
assert path and "/" in path
# Normalize root: /foo/bar
org_path = path
path = normalize_lock_root(path)
lock["root"] = path
# Normalize timeout from ttl to expire-date
timeout = float(lock.get("timeout"))
if timeout is None:
timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT
elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
lock["timeout"] = timeout
lock["expire"] = time.time() + timeout
validate_lock(lock)
token = generate_lock_token()
lock["token"] = token
# Store lock
self._dict[token] = lock
# Store locked path reference
key = "URL2TOKEN:{}".format(path)
if key not in self._dict:
self._dict[key] = [token]
else:
# Note: Shelve dictionary returns copies, so we must reassign
# values:
tokList = self._dict[key]
tokList.append(token)
self._dict[key] = tokList
self._flush()
_logger.debug(
"LockStorageDict.set({!r}): {}".format(org_path, lock_string(lock))
)
return lock
finally:
self._lock.release() |
def refresh(self, token, timeout):
"""Modify an existing lock's timeout.
token:
Valid lock token.
timeout:
Suggested lifetime in seconds (-1 for infinite).
The real expiration time may be shorter than requested!
Returns:
Lock dictionary.
Raises ValueError, if token is invalid.
"""
assert token in self._dict, "Lock must exist"
assert timeout == -1 or timeout > 0
if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
self._lock.acquire_write()
try:
# Note: shelve dictionary returns copies, so we must reassign
# values:
lock = self._dict[token]
lock["timeout"] = timeout
lock["expire"] = time.time() + timeout
self._dict[token] = lock
self._flush()
finally:
self._lock.release()
return lock |
def delete(self, token):
"""Delete lock.
Returns True on success. False, if token does not exist, or is expired.
"""
self._lock.acquire_write()
try:
lock = self._dict.get(token)
_logger.debug("delete {}".format(lock_string(lock)))
if lock is None:
return False
# Remove url to lock mapping
key = "URL2TOKEN:{}".format(lock.get("root"))
if key in self._dict:
# _logger.debug(" delete token {} from url {}".format(token, lock.get("root")))
tokList = self._dict[key]
if len(tokList) > 1:
# Note: shelve dictionary returns copies, so we must
# reassign values:
tokList.remove(token)
self._dict[key] = tokList
else:
del self._dict[key]
# Remove the lock
del self._dict[token]
self._flush()
finally:
self._lock.release()
return True |
def get_lock_list(self, path, include_root, include_children, token_only):
"""Return a list of direct locks for <path>.
Expired locks are *not* returned (but may be purged).
path:
Normalized path (utf8 encoded string, no trailing '/')
include_root:
False: don't add <path> lock (only makes sense, when include_children
is True).
include_children:
True: Also check all sub-paths for existing locks.
token_only:
True: only a list of token is returned. This may be implemented
more efficiently by some providers.
Returns:
List of valid lock dictionaries (may be empty).
"""
assert compat.is_native(path)
assert path and path.startswith("/")
assert include_root or include_children
def __appendLocks(toklist):
# Since we can do this quickly, we use self.get() even if
# token_only is set, so expired locks are purged.
for token in toklist:
lock = self.get(token)
if lock:
if token_only:
lockList.append(lock["token"])
else:
lockList.append(lock)
path = normalize_lock_root(path)
self._lock.acquire_read()
try:
key = "URL2TOKEN:{}".format(path)
tokList = self._dict.get(key, [])
lockList = []
if include_root:
__appendLocks(tokList)
if include_children:
for u, ltoks in self._dict.items():
if util.is_child_uri(key, u):
__appendLocks(ltoks)
return lockList
finally:
self._lock.release() |
def _flush(self):
"""Write persistent dictionary to disc."""
_logger.debug("_flush()")
self._lock.acquire_write() # TODO: read access is enough?
try:
self._dict.sync()
finally:
self._lock.release() |
def clear(self):
"""Delete all entries."""
self._lock.acquire_write() # TODO: read access is enough?
try:
was_closed = self._dict is None
if was_closed:
self.open()
if len(self._dict):
self._dict.clear()
self._dict.sync()
if was_closed:
self.close()
finally:
self._lock.release() |
def _fail(self, value, context_info=None, src_exception=None, err_condition=None):
"""Wrapper to raise (and log) DAVError."""
e = DAVError(value, context_info, src_exception, err_condition)
if self.verbose >= 4:
_logger.warning(
"Raising DAVError {}".format(
safe_re_encode(e.get_user_info(), sys.stdout.encoding)
)
)
raise e |
def begin_write(self, content_type=None):
"""Open content as a stream for writing.
See DAVResource.begin_write()
"""
assert not self.is_collection
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
# _logger.debug("begin_write: {}, {}".format(self._file_path, "wb"))
# GC issue 57: always store as binary
return open(self._file_path, "wb", BUFFER_SIZE) |
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
os.unlink(self._file_path)
self.remove_all_properties(True)
self.remove_all_locks(True) |
def copy_move_single(self, dest_path, is_move):
"""See DAVResource.copy_move_single() """
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
# Copy file (overwrite, if exists)
shutil.copy2(self._file_path, fpDest)
# (Live properties are copied by copy2 or copystat)
# Copy dead properties
propMan = self.provider.prop_manager
if propMan:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
if is_move:
propMan.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=False,
environ=self.environ,
)
else:
propMan.copy_properties(
self.get_ref_url(), destRes.get_ref_url(), self.environ
) |
def move_recursive(self, dest_path):
"""See DAVResource.move_recursive() """
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
assert not os.path.exists(fpDest)
_logger.debug("move_recursive({}, {})".format(self._file_path, fpDest))
shutil.move(self._file_path, fpDest)
# (Live properties are copied by copy2 or copystat)
# Move dead properties
if self.provider.prop_manager:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
self.provider.prop_manager.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=True,
environ=self.environ,
) |
def set_last_modified(self, dest_path, time_stamp, dry_run):
"""Set last modified time for destPath to timeStamp on epoch-format"""
# Translate time from RFC 1123 to seconds since epoch format
secs = util.parse_time_string(time_stamp)
if not dry_run:
os.utime(self._file_path, (secs, secs))
return True |
def get_member_names(self):
"""Return list of direct collection member names (utf-8 encoded).
See DAVCollection.get_member_names()
"""
# On Windows NT/2k/XP and Unix, if path is a Unicode object, the result
# will be a list of Unicode objects.
# Undecodable filenames will still be returned as string objects
# If we don't request unicode, for example Vista may return a '?'
# instead of a special character. The name would then be unusable to
# build a distinct URL that references this resource.
nameList = []
# self._file_path is unicode, so os.listdir returns unicode as well
assert compat.is_unicode(self._file_path)
for name in os.listdir(self._file_path):
if not compat.is_unicode(name):
name = name.decode(sys.getfilesystemencoding())
assert compat.is_unicode(name)
# Skip non files (links and mount points)
fp = os.path.join(self._file_path, name)
if not os.path.isdir(fp) and not os.path.isfile(fp):
_logger.debug("Skipping non-file {!r}".format(fp))
continue
# name = name.encode("utf8")
name = compat.to_native(name)
nameList.append(name)
return nameList |
def get_member(self, name):
"""Return direct collection member (DAVResource or derived).
See DAVCollection.get_member()
"""
assert compat.is_native(name), "{!r}".format(name)
fp = os.path.join(self._file_path, compat.to_unicode(name))
# name = name.encode("utf8")
path = util.join_uri(self.path, name)
if os.path.isdir(fp):
res = FolderResource(path, self.environ, fp)
elif os.path.isfile(fp):
res = FileResource(path, self.environ, fp)
else:
_logger.debug("Skipping non-file {}".format(path))
res = None
return res |
def create_empty_resource(self, name):
"""Create an empty (length-0) resource.
See DAVResource.create_empty_resource()
"""
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
f = open(fp, "wb")
f.close()
return self.provider.get_resource_inst(path, self.environ) |
def create_collection(self, name):
"""Create a new collection as member of self.
See DAVResource.create_collection()
"""
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
os.mkdir(fp) |
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(True)
self.remove_all_locks(True) |
def copy_move_single(self, dest_path, is_move):
"""See DAVResource.copy_move_single() """
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
# Create destination collection, if not exists
if not os.path.exists(fpDest):
os.mkdir(fpDest)
try:
# may raise: [Error 5] Permission denied:
# u'C:\\temp\\litmus\\ccdest'
shutil.copystat(self._file_path, fpDest)
except Exception:
_logger.exception("Could not copy folder stats: {}".format(self._file_path))
# (Live properties are copied by copy2 or copystat)
# Copy dead properties
propMan = self.provider.prop_manager
if propMan:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
if is_move:
propMan.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=False,
environ=self.environ,
)
else:
propMan.copy_properties(
self.get_ref_url(), destRes.get_ref_url(), self.environ
) |
def _loc_to_file_path(self, path, environ=None):
"""Convert resource path to a unicode absolute file path.
Optional environ argument may be useful e.g. in relation to per-user
sub-folder chrooting inside root_folder_path.
"""
root_path = self.root_folder_path
assert root_path is not None
assert compat.is_native(root_path)
assert compat.is_native(path)
path_parts = path.strip("/").split("/")
file_path = os.path.abspath(os.path.join(root_path, *path_parts))
if not file_path.startswith(root_path):
raise RuntimeError(
"Security exception: tried to access file outside root: {}".format(
file_path
)
)
# Convert to unicode
file_path = util.to_unicode_safe(file_path)
return file_path |
def get_resource_inst(self, path, environ):
"""Return info dictionary for path.
See DAVProvider.get_resource_inst()
"""
self._count_get_resource_inst += 1
fp = self._loc_to_file_path(path, environ)
if not os.path.exists(fp):
return None
if os.path.isdir(fp):
return FolderResource(path, environ, fp)
return FileResource(path, environ, fp) |
def lock_string(lock_dict):
"""Return readable rep."""
if not lock_dict:
return "Lock: None"
if lock_dict["expire"] < 0:
expire = "Infinite ({})".format(lock_dict["expire"])
else:
expire = "{} (in {} seconds)".format(
util.get_log_time(lock_dict["expire"]), lock_dict["expire"] - time.time()
)
return "Lock(<{}..>, '{}', {}, {}, depth-{}, until {}".format(
# first 4 significant token characters
lock_dict.get("token", "?" * 30)[18:22],
lock_dict.get("root"),
lock_dict.get("principal"),
lock_dict.get("scope"),
lock_dict.get("depth"),
expire,
) |
def _generate_lock(
self, principal, lock_type, lock_scope, lock_depth, lock_owner, path, timeout
):
"""Acquire lock and return lock_dict.
principal
Name of the principal.
lock_type
Must be 'write'.
lock_scope
Must be 'shared' or 'exclusive'.
lock_depth
Must be '0' or 'infinity'.
lock_owner
String identifying the owner.
path
Resource URL.
timeout
Seconds to live
This function does NOT check, if the new lock creates a conflict!
"""
if timeout is None:
timeout = LockManager.LOCK_TIME_OUT_DEFAULT
elif timeout < 0:
timeout = -1
lock_dict = {
"root": path,
"type": lock_type,
"scope": lock_scope,
"depth": lock_depth,
"owner": lock_owner,
"timeout": timeout,
"principal": principal,
}
#
self.storage.create(path, lock_dict)
return lock_dict |
def acquire(
self,
url,
lock_type,
lock_scope,
lock_depth,
lock_owner,
timeout,
principal,
token_list,
):
"""Check for permissions and acquire a lock.
On success return new lock dictionary.
On error raise a DAVError with an embedded DAVErrorCondition.
"""
url = normalize_lock_root(url)
self._lock.acquire_write()
try:
# Raises DAVError on conflict:
self._check_lock_permission(
url, lock_type, lock_scope, lock_depth, token_list, principal
)
return self._generate_lock(
principal, lock_type, lock_scope, lock_depth, lock_owner, url, timeout
)
finally:
self._lock.release() |
def refresh(self, token, timeout=None):
"""Set new timeout for lock, if existing and valid."""
if timeout is None:
timeout = LockManager.LOCK_TIME_OUT_DEFAULT
return self.storage.refresh(token, timeout) |
def get_lock(self, token, key=None):
"""Return lock_dict, or None, if not found or invalid.
Side effect: if lock is expired, it will be purged and None is returned.
key:
name of lock attribute that will be returned instead of a dictionary.
"""
assert key in (
None,
"type",
"scope",
"depth",
"owner",
"root",
"timeout",
"principal",
"token",
)
lock = self.storage.get(token)
if key is None or lock is None:
return lock
return lock[key] |
def get_url_lock_list(self, url):
"""Return list of lock_dict, if <url> is protected by at least one direct, valid lock.
Side effect: expired locks for this url are purged.
"""
url = normalize_lock_root(url)
lockList = self.storage.get_lock_list(
url, include_root=True, include_children=False, token_only=False
)
return lockList |
def get_indirect_url_lock_list(self, url, principal=None):
"""Return a list of valid lockDicts, that protect <path> directly or indirectly.
If a principal is given, only locks owned by this principal are returned.
Side effect: expired locks for this path and all parents are purged.
"""
url = normalize_lock_root(url)
lockList = []
u = url
while u:
ll = self.storage.get_lock_list(
u, include_root=True, include_children=False, token_only=False
)
for l in ll:
if u != url and l["depth"] != "infinity":
continue # We only consider parents with Depth: infinity
# TODO: handle shared locks in some way?
# if (l["scope"] == "shared" and lock_scope == "shared"
# and principal != l["principal"]):
# continue # Only compatible with shared locks by other users
if principal is None or principal == l["principal"]:
lockList.append(l)
u = util.get_uri_parent(u)
return lockList |
def is_url_locked_by_token(self, url, lock_token):
"""Check, if url (or any of it's parents) is locked by lock_token."""
lockUrl = self.get_lock(lock_token, "root")
return lockUrl and util.is_equal_or_child_uri(lockUrl, url) |
def _check_lock_permission(
self, url, lock_type, lock_scope, lock_depth, token_list, principal
):
"""Check, if <principal> can lock <url>, otherwise raise an error.
If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is
raised. An embedded DAVErrorCondition contains the conflicting resource.
@see http://www.webdav.org/specs/rfc4918.html#lock-model
- Parent locks WILL NOT be conflicting, if they are depth-0.
- Exclusive depth-infinity parent locks WILL be conflicting, even if
they are owned by <principal>.
- Child locks WILL NOT be conflicting, if we request a depth-0 lock.
- Exclusive child locks WILL be conflicting, even if they are owned by
<principal>. (7.7)
- It is not enough to check whether a lock is owned by <principal>, but
also the token must be passed with the request. (Because <principal>
may run two different applications on his client.)
- <principal> cannot lock-exclusive, if he holds a parent shared-lock.
(This would only make sense, if he was the only shared-lock holder.)
- TODO: litmus tries to acquire a shared lock on one resource twice
(locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED.
So we allow multi shared locks on a resource even for the same
principal.
@param url: URL that shall be locked
@param lock_type: "write"
@param lock_scope: "shared"|"exclusive"
@param lock_depth: "0"|"infinity"
@param token_list: list of lock tokens, that the user submitted in If: header
@param principal: name of the principal requesting a lock
@return: None (or raise)
"""
assert lock_type == "write"
assert lock_scope in ("shared", "exclusive")
assert lock_depth in ("0", "infinity")
_logger.debug(
"checkLockPermission({}, {}, {}, {})".format(
url, lock_scope, lock_depth, principal
)
)
# Error precondition to collect conflicting URLs
errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict)
self._lock.acquire_read()
try:
# Check url and all parents for conflicting locks
u = url
while u:
ll = self.get_url_lock_list(u)
for l in ll:
_logger.debug(" check parent {}, {}".format(u, lock_string(l)))
if u != url and l["depth"] != "infinity":
# We only consider parents with Depth: infinity
continue
elif l["scope"] == "shared" and lock_scope == "shared":
# Only compatible with shared locks (even by same
# principal)
continue
# Lock conflict
_logger.debug(
" -> DENIED due to locked parent {}".format(lock_string(l))
)
errcond.add_href(l["root"])
u = util.get_uri_parent(u)
if lock_depth == "infinity":
# Check child URLs for conflicting locks
childLocks = self.storage.get_lock_list(
url, include_root=False, include_children=True, token_only=False
)
for l in childLocks:
assert util.is_child_uri(url, l["root"])
# if util.is_child_uri(url, l["root"]):
_logger.debug(
" -> DENIED due to locked child {}".format(lock_string(l))
)
errcond.add_href(l["root"])
finally:
self._lock.release()
# If there were conflicts, raise HTTP_LOCKED for <url>, and pass
# conflicting resource with 'no-conflicting-lock' precondition
if len(errcond.hrefs) > 0:
raise DAVError(HTTP_LOCKED, err_condition=errcond)
return |
def _sync(self):
"""Write persistent dictionary to disc."""
_logger.debug("_sync()")
self._lock.acquire_write() # TODO: read access is enough?
try:
if self._loaded:
self._dict.sync()
finally:
self._lock.release() |
def acquire_read(self, timeout=None):
"""Acquire a read lock for the current thread, waiting at most
timeout seconds or doing a non-blocking check in case timeout is <= 0.
In case timeout is None, the call to acquire_read blocks until the
lock request can be serviced.
In case the timeout expires before the lock could be serviced, a
RuntimeError is thrown."""
if timeout is not None:
endtime = time() + timeout
me = currentThread()
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new read lock, always.
self.__writercount += 1
return
while True:
if self.__writer is None:
# Only test anything if there is no current writer.
if self.__upgradewritercount or self.__pendingwriters:
if me in self.__readers:
# Only grant a read lock if we already have one
# in case writers are waiting for their turn.
# This means that writers can't easily get starved
# (but see below, readers can).
self.__readers[me] += 1
return
# No, we aren't a reader (yet), wait for our turn.
else:
# Grant a new read lock, always, in case there are
# no pending writers (and no writer).
self.__readers[me] = self.__readers.get(me, 0) + 1
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
raise RuntimeError("Acquiring read lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release() |
def acquire_write(self, timeout=None):
"""Acquire a write lock for the current thread, waiting at most
timeout seconds or doing a non-blocking check in case timeout is <= 0.
In case the write lock cannot be serviced due to the deadlock
condition mentioned above, a ValueError is raised.
In case timeout is None, the call to acquire_write blocks until the
lock request can be serviced.
In case the timeout expires before the lock could be serviced, a
RuntimeError is thrown."""
if timeout is not None:
endtime = time() + timeout
me, upgradewriter = currentThread(), False
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new write lock, always.
self.__writercount += 1
return
elif me in self.__readers:
# If we are a reader, no need to add us to pendingwriters,
# we get the upgradewriter slot.
if self.__upgradewritercount:
# If we are a reader and want to upgrade, and someone
# else also wants to upgrade, there is no way we can do
# this except if one of us releases all his read locks.
# Signal this to user.
raise ValueError("Inevitable dead lock, denying write lock")
upgradewriter = True
self.__upgradewritercount = self.__readers.pop(me)
else:
# We aren't a reader, so add us to the pending writers queue
# for synchronization with the readers.
self.__pendingwriters.append(me)
while True:
if not self.__readers and self.__writer is None:
# Only test anything if there are no readers and writers.
if self.__upgradewritercount:
if upgradewriter:
# There is a writer to upgrade, and it's us. Take
# the write lock.
self.__writer = me
self.__writercount = self.__upgradewritercount + 1
self.__upgradewritercount = 0
return
# There is a writer to upgrade, but it's not us.
# Always leave the upgrade writer the advance slot,
# because he presumes he'll get a write lock directly
# from a previously held read lock.
elif self.__pendingwriters[0] is me:
# If there are no readers and writers, it's always
# fine for us to take the writer slot, removing us
# from the pending writers queue.
# This might mean starvation for readers, though.
self.__writer = me
self.__writercount = 1
self.__pendingwriters = self.__pendingwriters[1:]
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
if upgradewriter:
# Put us back on the reader queue. No need to
# signal anyone of this change, because no other
# writer could've taken our spot before we got
# here (because of remaining readers), as the test
# for proper conditions is at the start of the
# loop, not at the end.
self.__readers[me] = self.__upgradewritercount
self.__upgradewritercount = 0
else:
# We were a simple pending writer, just remove us
# from the FIFO list.
self.__pendingwriters.remove(me)
raise RuntimeError("Acquiring write lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release() |
def release(self):
"""Release the currently held lock.
In case the current thread holds no lock, a ValueError is thrown."""
me = currentThread()
self.__condition.acquire()
try:
if self.__writer is me:
# We are the writer, take one nesting depth away.
self.__writercount -= 1
if not self.__writercount:
# No more write locks; take our writer position away and
# notify waiters of the new circumstances.
self.__writer = None
self.__condition.notifyAll()
elif me in self.__readers:
# We are a reader currently, take one nesting depth away.
self.__readers[me] -= 1
if not self.__readers[me]:
# No more read locks, take our reader position away.
del self.__readers[me]
if not self.__readers:
# No more readers, notify waiters of the new
# circumstances.
self.__condition.notifyAll()
else:
raise ValueError("Trying to release unheld lock")
finally:
self.__condition.release() |
def _parse_gmt_time(timestring):
"""Return a standard time tuple (see time and calendar), for a date/time string."""
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
try:
return time.strptime(timestring, "%a, %d %b %Y %H:%M:%S GMT")
except Exception:
pass
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
try:
return time.strptime(timestring, "%A %d-%b-%y %H:%M:%S GMT")
except Exception:
pass
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
try:
return time.strptime(timestring, "%a %b %d %H:%M:%S %Y")
except Exception:
pass
# Sun Nov 6 08:49:37 1994 +0100 ; ANSI C's asctime() format with
# timezon
try:
return parsedate(timestring)
except Exception:
pass
return None |
def init_logging(config):
"""Initialize base logger named 'wsgidav'.
The base logger is filtered by the `verbose` configuration option.
Log entries will have a time stamp and thread id.
:Parameters:
verbose : int
Verbosity configuration (0..5)
enable_loggers : string list
List of module logger names, that will be switched to DEBUG level.
Module loggers
~~~~~~~~~~~~~~
Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be
independently switched to DEBUG mode.
Except for verbosity, they will inherit settings from the base logger.
They will suppress DEBUG level messages, unless they are enabled by passing
their name to util.init_logging().
If enabled, module loggers will print DEBUG messages, even if verbose == 3.
Example initialize and use a module logger, that will generate output,
if enabled (and verbose >= 2)::
_logger = util.get_module_logger(__name__)
[..]
_logger.debug("foo: '{}'".format(s))
This logger would be enabled by passing its name to init_logging()::
enable_loggers = ["lock_manager",
"property_manager",
]
util.init_logging(2, enable_loggers)
Log Level Matrix
~~~~~~~~~~~~~~~~
+---------+--------+---------------------------------------------------------------+
| Verbose | Option | Log level |
| level | +-------------+------------------------+------------------------+
| | | base logger | module logger(default) | module logger(enabled) |
+=========+========+=============+========================+========================+
| 0 | -qqq | CRITICAL | CRITICAL | CRITICAL |
+---------+--------+-------------+------------------------+------------------------+
| 1 | -qq | ERROR | ERROR | ERROR |
+---------+--------+-------------+------------------------+------------------------+
| 2 | -q | WARN | WARN | WARN |
+---------+--------+-------------+------------------------+------------------------+
| 3 | | INFO | INFO | **DEBUG** |
+---------+--------+-------------+------------------------+------------------------+
| 4 | -v | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
| 5 | -vv | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
"""
verbose = config.get("verbose", 3)
enable_loggers = config.get("enable_loggers", [])
if enable_loggers is None:
enable_loggers = []
logger_date_format = config.get("logger_date_format", "%Y-%m-%d %H:%M:%S")
logger_format = config.get(
"logger_format",
"%(asctime)s.%(msecs)03d - <%(thread)d> %(name)-27s %(levelname)-8s: %(message)s",
)
formatter = logging.Formatter(logger_format, logger_date_format)
# Define handlers
consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler = logging.StreamHandler(sys.stderr)
consoleHandler.setFormatter(formatter)
# consoleHandler.setLevel(logging.DEBUG)
# Add the handlers to the base logger
logger = logging.getLogger(BASE_LOGGER_NAME)
if verbose >= 4: # --verbose
logger.setLevel(logging.DEBUG)
elif verbose == 3: # default
logger.setLevel(logging.INFO)
elif verbose == 2: # --quiet
logger.setLevel(logging.WARN)
# consoleHandler.setLevel(logging.WARN)
elif verbose == 1: # -qq
logger.setLevel(logging.ERROR)
# consoleHandler.setLevel(logging.WARN)
else: # -qqq
logger.setLevel(logging.CRITICAL)
# consoleHandler.setLevel(logging.ERROR)
# Don't call the root's handlers after our custom handlers
logger.propagate = False
# Remove previous handlers
for hdlr in logger.handlers[:]: # Must iterate an array copy
try:
hdlr.flush()
hdlr.close()
except Exception:
pass
logger.removeHandler(hdlr)
logger.addHandler(consoleHandler)
if verbose >= 3:
for e in enable_loggers:
if not e.startswith(BASE_LOGGER_NAME + "."):
e = BASE_LOGGER_NAME + "." + e
lg = logging.getLogger(e.strip())
lg.setLevel(logging.DEBUG) |
def get_module_logger(moduleName, defaultToVerbose=False):
"""Create a module logger, that can be en/disabled by configuration.
@see: unit.init_logging
"""
# moduleName = moduleName.split(".")[-1]
if not moduleName.startswith(BASE_LOGGER_NAME + "."):
moduleName = BASE_LOGGER_NAME + "." + moduleName
logger = logging.getLogger(moduleName)
# if logger.level == logging.NOTSET and not defaultToVerbose:
# logger.setLevel(logging.INFO) # Disable debug messages by default
return logger |
def dynamic_import_class(name):
"""Import a class from a module string, e.g. ``my.module.ClassName``."""
import importlib
module_name, class_name = name.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except Exception as e:
_logger.exception("Dynamic import of {!r} failed: {}".format(name, e))
raise
the_class = getattr(module, class_name)
return the_class |
def dynamic_instantiate_middleware(name, args, expand=None):
"""Import a class and instantiate with custom args.
Example:
name = "my.module.Foo"
args_dict = {
"bar": 42,
"baz": "qux"
}
=>
from my.module import Foo
return Foo(bar=42, baz="qux")
"""
def _expand(v):
"""Replace some string templates with defined values."""
if expand and compat.is_basestring(v) and v.lower() in expand:
return expand[v]
return v
try:
the_class = dynamic_import_class(name)
inst = None
if type(args) in (tuple, list):
args = tuple(map(_expand, args))
inst = the_class(*args)
else:
assert type(args) is dict
args = {k: _expand(v) for k, v in args.items()}
inst = the_class(**args)
_logger.debug("Instantiate {}({}) => {}".format(name, args, inst))
except Exception:
_logger.exception("ERROR: Instantiate {}({}) => {}".format(name, args, inst))
return inst |
def save_split(s, sep, maxsplit):
"""Split string, always returning n-tuple (filled with None if necessary)."""
tok = s.split(sep, maxsplit)
while len(tok) <= maxsplit:
tok.append(None)
return tok |
def pop_path(path):
"""Return '/a/b/c' -> ('a', '/b/c')."""
if path in ("", "/"):
return ("", "")
assert path.startswith("/")
first, _sep, rest = path.lstrip("/").partition("/")
return (first, "/" + rest) |
def pop_path2(path):
"""Return '/a/b/c' -> ('a', 'b', '/c')."""
if path in ("", "/"):
return ("", "", "")
first, rest = pop_path(path)
second, rest = pop_path(rest)
return (first, second, "/" + rest) |
def shift_path(script_name, path_info):
"""Return ('/a', '/b/c') -> ('b', '/a/b', 'c')."""
segment, rest = pop_path(path_info)
return (segment, join_uri(script_name.rstrip("/"), segment), rest.rstrip("/")) |
def split_namespace(clarkName):
"""Return (namespace, localname) tuple for a property name in Clark Notation.
Namespace defaults to ''.
Example:
'{DAV:}foo' -> ('DAV:', 'foo')
'bar' -> ('', 'bar')
"""
if clarkName.startswith("{") and "}" in clarkName:
ns, localname = clarkName.split("}", 1)
return (ns[1:], localname)
return ("", clarkName) |
def to_unicode_safe(s):
"""Convert a binary string to Unicode using UTF-8 (fallback to ISO-8859-1)."""
try:
u = compat.to_unicode(s, "utf8")
except ValueError:
_logger.error(
"to_unicode_safe({!r}) *** UTF-8 failed. Trying ISO-8859-1".format(s)
)
u = compat.to_unicode(s, "ISO-8859-1")
return u |
def safe_re_encode(s, encoding_to, errors="backslashreplace"):
"""Re-encode str or binary so that is compatible with a given encoding (replacing
unsupported chars).
We use ASCII as default, which gives us some output that contains \x99 and \u9999
for every character > 127, for easier debugging.
(e.g. if we don't know the encoding, see #87, #96)
"""
# prev = s
if not encoding_to:
encoding_to = "ASCII"
if compat.is_bytes(s):
s = s.decode(encoding_to, errors=errors).encode(encoding_to)
else:
s = s.encode(encoding_to, errors=errors).decode(encoding_to)
# print("safe_re_encode({}, {}) => {}".format(prev, encoding_to, s))
return s |
def string_repr(s):
"""Return a string as hex dump."""
if compat.is_bytes(s):
res = "{!r}: ".format(s)
for b in s:
if type(b) is str: # Py2
b = ord(b)
res += "%02x " % b
return res
return "{}".format(s) |
def byte_number_string(
number, thousandsSep=True, partition=False, base1024=True, appendBytes=True
):
"""Convert bytes into human-readable representation."""
magsuffix = ""
bytesuffix = ""
if partition:
magnitude = 0
if base1024:
while number >= 1024:
magnitude += 1
number = number >> 10
else:
while number >= 1000:
magnitude += 1
number /= 1000.0
# TODO: use "9 KB" instead of "9K Bytes"?
# TODO use 'kibi' for base 1024?
# http://en.wikipedia.org/wiki/Kibi-#IEC_standard_prefixes
magsuffix = ["", "K", "M", "G", "T", "P"][magnitude]
if appendBytes:
if number == 1:
bytesuffix = " Byte"
else:
bytesuffix = " Bytes"
if thousandsSep and (number >= 1000 or magsuffix):
# locale.setlocale(locale.LC_ALL, "")
# # TODO: make precision configurable
# snum = locale.format("%d", number, thousandsSep)
snum = "{:,d}".format(number)
else:
snum = str(number)
return "{}{}{}".format(snum, magsuffix, bytesuffix) |
def read_and_discard_input(environ):
"""Read 1 byte from wsgi.input, if this has not been done yet.
Returning a response without reading from a request body might confuse the
WebDAV client.
This may happen, if an exception like '401 Not authorized', or
'500 Internal error' was raised BEFORE anything was read from the request
stream.
See GC issue 13, issue 23
See http://groups.google.com/group/paste-users/browse_frm/thread/fc0c9476047e9a47?hl=en
Note that with persistent sessions (HTTP/1.1) we must make sure, that the
'Connection: closed' header is set with the response, to prevent reusing
the current stream.
"""
if environ.get("wsgidav.some_input_read") or environ.get("wsgidav.all_input_read"):
return
cl = get_content_length(environ)
assert cl >= 0
if cl == 0:
return
READ_ALL = True
environ["wsgidav.some_input_read"] = 1
if READ_ALL:
environ["wsgidav.all_input_read"] = 1
wsgi_input = environ["wsgi.input"]
# TODO: check if still required after GC issue 24 is fixed
if hasattr(wsgi_input, "_consumed") and hasattr(wsgi_input, "length"):
# Seems to be Paste's httpserver.LimitedLengthFile
# see http://groups.google.com/group/paste-users/browse_thread/thread/fc0c9476047e9a47/aa4a3aa416016729?hl=en&lnk=gst&q=.input#aa4a3aa416016729 # noqa
# Consume something if nothing was consumed *and* work
# around a bug where paste.httpserver allows negative lengths
if wsgi_input._consumed == 0 and wsgi_input.length > 0:
# This seems to work even if there's 10K of input.
if READ_ALL:
n = wsgi_input.length
else:
n = 1
body = wsgi_input.read(n)
_logger.debug(
"Reading {} bytes from potentially unread httpserver.LimitedLengthFile: '{}'...".format(
n, body[:50]
)
)
elif hasattr(wsgi_input, "_sock") and hasattr(wsgi_input._sock, "settimeout"):
# Seems to be a socket
try:
# Set socket to non-blocking
sock = wsgi_input._sock
timeout = sock.gettimeout()
sock.settimeout(0)
# Read one byte
try:
if READ_ALL:
n = cl
else:
n = 1
body = wsgi_input.read(n)
_logger.debug(
"Reading {} bytes from potentially unread POST body: '{}'...".format(
n, body[:50]
)
)
except socket.error as se:
# se(10035, 'The socket operation could not complete without blocking')
_logger.error("-> read {} bytes failed: {}".format(n, se))
# Restore socket settings
sock.settimeout(timeout)
except Exception:
_logger.error("--> wsgi_input.read(): {}".format(sys.exc_info())) |
def fail(value, context_info=None, src_exception=None, err_condition=None):
"""Wrapper to raise (and log) DAVError."""
if isinstance(value, Exception):
e = as_DAVError(value)
else:
e = DAVError(value, context_info, src_exception, err_condition)
_logger.error("Raising DAVError {}".format(e.get_user_info()))
raise e |
def join_uri(uri, *segments):
"""Append segments to URI.
Example: join_uri("/a/b", "c", "d")
"""
sub = "/".join(segments)
if not sub:
return uri
return uri.rstrip("/") + "/" + sub |
def is_child_uri(parentUri, childUri):
"""Return True, if childUri is a child of parentUri.
This function accounts for the fact that '/a/b/c' and 'a/b/c/' are
children of '/a/b' (and also of '/a/b/').
Note that '/a/b/cd' is NOT a child of 'a/b/c'.
"""
return (
parentUri
and childUri
and childUri.rstrip("/").startswith(parentUri.rstrip("/") + "/")
) |
def is_equal_or_child_uri(parentUri, childUri):
"""Return True, if childUri is a child of parentUri or maps to the same resource.
Similar to <util.is_child_uri>_ , but this method also returns True, if parent
equals child. ('/a/b' is considered identical with '/a/b/').
"""
return (
parentUri
and childUri
and (childUri.rstrip("/") + "/").startswith(parentUri.rstrip("/") + "/")
) |
def make_complete_url(environ, localUri=None):
"""URL reconstruction according to PEP 333.
@see https://www.python.org/dev/peps/pep-3333/#url-reconstruction
"""
url = environ["wsgi.url_scheme"] + "://"
if environ.get("HTTP_HOST"):
url += environ["HTTP_HOST"]
else:
url += environ["SERVER_NAME"]
if environ["wsgi.url_scheme"] == "https":
if environ["SERVER_PORT"] != "443":
url += ":" + environ["SERVER_PORT"]
else:
if environ["SERVER_PORT"] != "80":
url += ":" + environ["SERVER_PORT"]
url += compat.quote(environ.get("SCRIPT_NAME", ""))
if localUri is None:
url += compat.quote(environ.get("PATH_INFO", ""))
if environ.get("QUERY_STRING"):
url += "?" + environ["QUERY_STRING"]
else:
url += localUri # TODO: quote?
return url |
def parse_xml_body(environ, allow_empty=False):
"""Read request body XML into an etree.Element.
Return None, if no request body was sent.
Raise HTTP_BAD_REQUEST, if something else went wrong.
TODO: this is a very relaxed interpretation: should we raise HTTP_BAD_REQUEST
instead, if CONTENT_LENGTH is missing, invalid, or 0?
RFC: For compatibility with HTTP/1.0 applications, HTTP/1.1 requests containing
a message-body MUST include a valid Content-Length header field unless the
server is known to be HTTP/1.1 compliant.
If a request contains a message-body and a Content-Length is not given, the
server SHOULD respond with 400 (bad request) if it cannot determine the
length of the message, or with 411 (length required) if it wishes to insist
on receiving a valid Content-Length."
So I'd say, we should accept a missing CONTENT_LENGTH, and try to read the
content anyway.
But WSGI doesn't guarantee to support input.read() without length(?).
At least it locked, when I tried it with a request that had a missing
content-type and no body.
Current approach: if CONTENT_LENGTH is
- valid and >0:
read body (exactly <CONTENT_LENGTH> bytes) and parse the result.
- 0:
Assume empty body and return None or raise exception.
- invalid (negative or not a number:
raise HTTP_BAD_REQUEST
- missing:
NOT: Try to read body until end and parse the result.
BUT: assume '0'
- empty string:
WSGI allows it to be empty or absent: treated like 'missing'.
"""
#
clHeader = environ.get("CONTENT_LENGTH", "").strip()
# content_length = -1 # read all of stream
if clHeader == "":
# No Content-Length given: read to end of stream
# TODO: etree.parse() locks, if input is invalid?
# pfroot = etree.parse(environ["wsgi.input"]).getroot()
# requestbody = environ["wsgi.input"].read() # TODO: read() should be
# called in a loop?
requestbody = ""
else:
try:
content_length = int(clHeader)
if content_length < 0:
raise DAVError(HTTP_BAD_REQUEST, "Negative content-length.")
except ValueError:
raise DAVError(HTTP_BAD_REQUEST, "content-length is not numeric.")
if content_length == 0:
requestbody = ""
else:
requestbody = environ["wsgi.input"].read(content_length)
environ["wsgidav.all_input_read"] = 1
if requestbody == "":
if allow_empty:
return None
else:
raise DAVError(HTTP_BAD_REQUEST, "Body must not be empty.")
try:
rootEL = etree.fromstring(requestbody)
except Exception as e:
raise DAVError(HTTP_BAD_REQUEST, "Invalid XML format.", src_exception=e)
# If dumps of the body are desired, then this is the place to do it pretty:
if environ.get("wsgidav.dump_request_body"):
_logger.info(
"{} XML request body:\n{}".format(
environ["REQUEST_METHOD"],
compat.to_native(xml_to_bytes(rootEL, pretty_print=True)),
)
)
environ["wsgidav.dump_request_body"] = False
return rootEL |
def send_status_response(environ, start_response, e, add_headers=None, is_head=False):
"""Start a WSGI response for a DAVError or status code."""
status = get_http_status_string(e)
headers = []
if add_headers:
headers.extend(add_headers)
# if 'keep-alive' in environ.get('HTTP_CONNECTION', '').lower():
# headers += [
# ('Connection', 'keep-alive'),
# ]
if e in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT):
# See paste.lint: these code don't have content
start_response(
status, [("Content-Length", "0"), ("Date", get_rfc1123_time())] + headers
)
return [b""]
if e in (HTTP_OK, HTTP_CREATED):
e = DAVError(e)
assert isinstance(e, DAVError)
content_type, body = e.get_response_page()
if is_head:
body = compat.b_empty
assert compat.is_bytes(body), body # If not, Content-Length is wrong!
start_response(
status,
[
("Content-Type", content_type),
("Date", get_rfc1123_time()),
("Content-Length", str(len(body))),
]
+ headers,
)
return [body] |
def add_property_response(multistatusEL, href, propList):
"""Append <response> element to <multistatus> element.
<prop> node depends on the value type:
- str or unicode: add element with this content
- None: add an empty element
- etree.Element: add XML element as child
- DAVError: add an empty element to an own <propstatus> for this status code
@param multistatusEL: etree.Element
@param href: global URL of the resource, e.g. 'http://server:port/path'.
@param propList: list of 2-tuples (name, value)
"""
# Split propList by status code and build a unique list of namespaces
nsCount = 1
nsDict = {}
nsMap = {}
propDict = {}
for name, value in propList:
status = "200 OK"
if isinstance(value, DAVError):
status = get_http_status_string(value)
# Always generate *empty* elements for props with error status
value = None
# Collect namespaces, so we can declare them in the <response> for
# compacter output
ns, _ = split_namespace(name)
if ns != "DAV:" and ns not in nsDict and ns != "":
nsDict[ns] = True
nsMap["NS{}".format(nsCount)] = ns
nsCount += 1
propDict.setdefault(status, []).append((name, value))
# <response>
responseEL = make_sub_element(multistatusEL, "{DAV:}response", nsmap=nsMap)
# log("href value:{}".format(string_repr(href)))
# etree.SubElement(responseEL, "{DAV:}href").text = toUnicode(href)
etree.SubElement(responseEL, "{DAV:}href").text = href
# etree.SubElement(responseEL, "{DAV:}href").text = compat.quote(href, safe="/" + "!*'(),"
# + "$-_|.")
# One <propstat> per status code
for status in propDict:
propstatEL = etree.SubElement(responseEL, "{DAV:}propstat")
# List of <prop>
propEL = etree.SubElement(propstatEL, "{DAV:}prop")
for name, value in propDict[status]:
if value is None:
etree.SubElement(propEL, name)
elif is_etree_element(value):
propEL.append(value)
else:
# value must be string or unicode
# log("{} value:{}".format(name, string_repr(value)))
# etree.SubElement(propEL, name).text = value
etree.SubElement(propEL, name).text = to_unicode_safe(value)
# <status>
etree.SubElement(propstatEL, "{DAV:}status").text = "HTTP/1.1 {}".format(status) |
def calc_base64(s):
"""Return base64 encoded binarystring."""
s = compat.to_bytes(s)
s = compat.base64_encodebytes(s).strip() # return bytestring
return compat.to_native(s) |
def get_etag(file_path):
"""Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
"""
# (At least on Vista) os.path.exists returns False, if a file name contains
# special characters, even if it is correctly UTF-8 encoded.
# So we convert to unicode. On the other hand, md5() needs a byte string.
if compat.is_bytes(file_path):
unicodeFilePath = to_unicode_safe(file_path)
else:
unicodeFilePath = file_path
file_path = file_path.encode("utf8")
if not os.path.isfile(unicodeFilePath):
return md5(file_path).hexdigest()
if sys.platform == "win32":
statresults = os.stat(unicodeFilePath)
return (
md5(file_path).hexdigest()
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
)
else:
statresults = os.stat(unicodeFilePath)
return (
str(statresults[stat.ST_INO])
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
) |
def obtain_content_ranges(rangetext, filesize):
"""
returns tuple (list, value)
list
content ranges as values to their parsed components in the tuple
(seek_position/abs position of first byte, abs position of last byte, num_of_bytes_to_read)
value
total length for Content-Length
"""
listReturn = []
seqRanges = rangetext.split(",")
for subrange in seqRanges:
matched = False
if not matched:
mObj = reByteRangeSpecifier.search(subrange)
if mObj:
firstpos = int(mObj.group(2))
if mObj.group(3) == "":
lastpos = filesize - 1
else:
lastpos = int(mObj.group(3))
if firstpos <= lastpos and firstpos < filesize:
if lastpos >= filesize:
lastpos = filesize - 1
listReturn.append((firstpos, lastpos))
matched = True
if not matched:
mObj = reSuffixByteRangeSpecifier.search(subrange)
if mObj:
firstpos = filesize - int(mObj.group(2))
if firstpos < 0:
firstpos = 0
lastpos = filesize - 1
listReturn.append((firstpos, lastpos))
matched = True
# consolidate ranges
listReturn.sort()
listReturn2 = []
totallength = 0
while len(listReturn) > 0:
(rfirstpos, rlastpos) = listReturn.pop()
counter = len(listReturn)
while counter > 0:
(nfirstpos, nlastpos) = listReturn[counter - 1]
if nlastpos < rfirstpos - 1 or nfirstpos > nlastpos + 1:
pass
else:
rfirstpos = min(rfirstpos, nfirstpos)
rlastpos = max(rlastpos, nlastpos)
del listReturn[counter - 1]
counter = counter - 1
listReturn2.append((rfirstpos, rlastpos, rlastpos - rfirstpos + 1))
totallength = totallength + rlastpos - rfirstpos + 1
return (listReturn2, totallength) |
def read_timeout_value_header(timeoutvalue):
"""Return -1 if infinite, else return numofsecs."""
timeoutsecs = 0
timeoutvaluelist = timeoutvalue.split(",")
for timeoutspec in timeoutvaluelist:
timeoutspec = timeoutspec.strip()
if timeoutspec.lower() == "infinite":
return -1
else:
listSR = reSecondsReader.findall(timeoutspec)
for secs in listSR:
timeoutsecs = int(secs)
if timeoutsecs > MAX_FINITE_TIMEOUT_LIMIT:
return -1
if timeoutsecs != 0:
return timeoutsecs
return None |
def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ):
"""Handle 'If-...:' headers (but not 'If:' header).
If-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
Only perform the action if the client supplied entity matches the
same entity on the server. This is mainly for methods like
PUT to only update a resource if it has not been modified since the
user last updated it.
If-Match: "737060cd8c284d8af7ad3082f209582d"
If-Modified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
Allows a 304 Not Modified to be returned if content is unchanged
If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT
If-None-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
Allows a 304 Not Modified to be returned if content is unchanged,
see HTTP ETag
If-None-Match: "737060cd8c284d8af7ad3082f209582d"
If-Unmodified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28
Only send the response if the entity has not been modified since a
specific time.
"""
if not dav_res:
return
# Conditions
# An HTTP/1.1 origin server, upon receiving a conditional request that includes both a
# Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and
# one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as
# cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so
# is consistent with all of the conditional header fields in the request.
if "HTTP_IF_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
break
raise DAVError(HTTP_PRECONDITION_FAILED, "If-Match header condition failed")
# TODO: after the refactoring
ifModifiedSinceFailed = False
if "HTTP_IF_MODIFIED_SINCE" in environ and dav_res.support_modified():
ifmodtime = parse_time_string(environ["HTTP_IF_MODIFIED_SINCE"])
if ifmodtime and ifmodtime > last_modified:
ifModifiedSinceFailed = True
# If-None-Match
# If none of the entity tags match, then the server MAY perform the requested method as if the
# If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header
# field (s) in the request. That is, if no entity tags match, then the server MUST NOT return
# a 304 (Not Modified) response.
ignoreIfModifiedSince = False
if "HTTP_IF_NONE_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_NONE_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
# ETag matched. If it's a GET request and we don't have an
# conflicting If-Modified header, we return NOT_MODIFIED
if (
environ["REQUEST_METHOD"] in ("GET", "HEAD")
and not ifModifiedSinceFailed
):
raise DAVError(HTTP_NOT_MODIFIED, "If-None-Match header failed")
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-None-Match header condition failed"
)
ignoreIfModifiedSince = True
if "HTTP_IF_UNMODIFIED_SINCE" in environ and dav_res.support_modified():
ifunmodtime = parse_time_string(environ["HTTP_IF_UNMODIFIED_SINCE"])
if ifunmodtime and ifunmodtime <= last_modified:
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-Unmodified-Since header condition failed"
)
if ifModifiedSinceFailed and not ignoreIfModifiedSince:
raise DAVError(HTTP_NOT_MODIFIED, "If-Modified-Since header condition failed")
return |
def parse_if_header_dict(environ):
"""Parse HTTP_IF header into a dictionary and lists, and cache the result.
@see http://www.webdav.org/specs/rfc4918.html#HEADER_If
"""
if "wsgidav.conditions.if" in environ:
return
if "HTTP_IF" not in environ:
environ["wsgidav.conditions.if"] = None
environ["wsgidav.ifLockTokenList"] = []
return
iftext = environ["HTTP_IF"].strip()
if not iftext.startswith("<"):
iftext = "<*>" + iftext
ifDict = dict([])
ifLockList = []
resource1 = "*"
for (tmpURLVar, URLVar, _tmpContentVar, contentVar) in reIfSeparator.findall(
iftext
):
if tmpURLVar != "":
resource1 = URLVar
else:
listTagContents = []
testflag = True
for listitem in reIfTagListContents.findall(contentVar):
if listitem.upper() != "NOT":
if listitem.startswith("["):
listTagContents.append(
(testflag, "entity", listitem.strip('"[]'))
)
else:
listTagContents.append(
(testflag, "locktoken", listitem.strip("<>"))
)
ifLockList.append(listitem.strip("<>"))
testflag = listitem.upper() != "NOT"
if resource1 in ifDict:
listTag = ifDict[resource1]
else:
listTag = []
ifDict[resource1] = listTag
listTag.append(listTagContents)
environ["wsgidav.conditions.if"] = ifDict
environ["wsgidav.ifLockTokenList"] = ifLockList
_logger.debug("parse_if_header_dict\n{}".format(pformat(ifDict)))
return |
def guess_mime_type(url):
"""Use the mimetypes module to lookup the type for an extension.
This function also adds some extensions required for HTML5
"""
(mimetype, _mimeencoding) = mimetypes.guess_type(url)
if not mimetype:
ext = os.path.splitext(url)[1]
mimetype = _MIME_TYPES.get(ext)
_logger.debug("mimetype({}): {}".format(url, mimetype))
if not mimetype:
mimetype = "application/octet-stream"
return mimetype |
def add_members(self, new_members):
"""
Add objects to the group.
Parameters
----------
new_members : list
A list of cobrapy objects to add to the group.
"""
if isinstance(new_members, string_types) or \
hasattr(new_members, "id"):
warn("need to pass in a list")
new_members = [new_members]
self._members.update(new_members) |
def remove_members(self, to_remove):
"""
Remove objects from the group.
Parameters
----------
to_remove : list
A list of cobra objects to remove from the group
"""
if isinstance(to_remove, string_types) or \
hasattr(to_remove, "id"):
warn("need to pass in a list")
to_remove = [to_remove]
self._members.difference_update(to_remove) |
def geometric_fba(model, epsilon=1E-06, max_tries=200, processes=None):
"""
Perform geometric FBA to obtain a unique, centered flux distribution.
Geometric FBA [1]_ formulates the problem as a polyhedron and
then solves it by bounding the convex hull of the polyhedron.
The bounding forms a box around the convex hull which reduces
with every iteration and extracts a unique solution in this way.
Parameters
----------
model: cobra.Model
The model to perform geometric FBA on.
epsilon: float, optional
The convergence tolerance of the model (default 1E-06).
max_tries: int, optional
Maximum number of iterations (default 200).
processes : int, optional
The number of parallel processes to run. If not explicitly passed,
will be set from the global configuration singleton.
Returns
-------
cobra.Solution
The solution object containing all the constraints required
for geometric FBA.
References
----------
.. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).
Flux balance analysis: A geometric perspective.
Journal of theoretical biology.258. 311-5.
10.1016/j.jtbi.2009.01.027.
"""
with model:
# Variables' and constraints' storage variables.
consts = []
obj_vars = []
updating_vars_cons = []
# The first iteration.
prob = model.problem
add_pfba(model) # Minimize the solution space to a convex hull.
model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
# Set the gFBA constraints.
for rxn in model.reactions:
var = prob.Variable("geometric_fba_" + rxn.id,
lb=0,
ub=mean_flux[rxn.id])
upper_const = prob.Constraint(rxn.flux_expression - var,
ub=mean_flux[rxn.id],
name="geometric_fba_upper_const_" +
rxn.id)
lower_const = prob.Constraint(rxn.flux_expression + var,
lb=fva_sol.at[rxn.id, "minimum"],
name="geometric_fba_lower_const_" +
rxn.id)
updating_vars_cons.append((rxn.id, var, upper_const, lower_const))
consts.extend([var, upper_const, lower_const])
obj_vars.append(var)
model.add_cons_vars(consts)
# Minimize the distance between the flux distribution and center.
model.objective = prob.Objective(Zero, sloppy=True, direction="min")
model.objective.set_linear_coefficients({v: 1.0 for v in obj_vars})
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
delta = (fva_sol["maximum"] - fva_sol["minimum"]).max()
count = 1
LOGGER.debug("Iteration: %d; delta: %.3g; status: %s.",
count, delta, sol.status)
# Following iterations that minimize the distance below threshold.
while delta > epsilon and count < max_tries:
for rxn_id, var, u_c, l_c in updating_vars_cons:
var.ub = mean_flux[rxn_id]
u_c.ub = mean_flux[rxn_id]
l_c.lb = fva_sol.at[rxn_id, "minimum"]
# Update loop variables.
sol = model.optimize()
fva_sol = flux_variability_analysis(model, processes=processes)
mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2
delta = (fva_sol["maximum"] - fva_sol["minimum"]).max()
count += 1
LOGGER.debug("Iteration: %d; delta: %.3g; status: %s.",
count, delta, sol.status)
if count == max_tries:
raise RuntimeError(
"The iterations have exceeded the maximum value of {}. "
"This is probably due to the increased complexity of the "
"model and can lead to inaccurate results. Please set a "
"different convergence tolerance and/or increase the "
"maximum iterations".format(max_tries)
)
return sol |
def _generate_index(self):
"""rebuild the _dict index"""
self._dict = {v.id: k for k, v in enumerate(self)} |
def get_by_any(self, iterable):
"""
Get a list of members using several different ways of indexing
Parameters
----------
iterable : list (if not, turned into single element list)
list where each element is either int (referring to an index in
in this DictList), string (a id of a member in this DictList) or
member of this DictList for pass-through
Returns
-------
list
a list of members
"""
def get_item(item):
if isinstance(item, int):
return self[item]
elif isinstance(item, string_types):
return self.get_by_id(item)
elif item in self:
return item
else:
raise TypeError("item in iterable cannot be '%s'" % type(item))
if not isinstance(iterable, list):
iterable = [iterable]
return [get_item(item) for item in iterable] |
def query(self, search_function, attribute=None):
"""Query the list
Parameters
----------
search_function : a string, regular expression or function
Used to find the matching elements in the list.
- a regular expression (possibly compiled), in which case the
given attribute of the object should match the regular expression.
- a function which takes one argument and returns True for
desired values
attribute : string or None
the name attribute of the object to passed as argument to the
`search_function`. If this is None, the object itself is used.
Returns
-------
DictList
a new list of objects which match the query
Examples
--------
>>> import cobra.test
>>> model = cobra.test.create_test_model('textbook')
>>> model.reactions.query(lambda x: x.boundary)
>>> import re
>>> regex = re.compile('^g', flags=re.IGNORECASE)
>>> model.metabolites.query(regex, attribute='name')
"""
def select_attribute(x):
if attribute is None:
return x
else:
return getattr(x, attribute)
try:
# if the search_function is a regular expression
regex_searcher = re.compile(search_function)
if attribute is not None:
matches = (
i for i in self if
regex_searcher.findall(select_attribute(i)) != [])
else:
# Don't regex on objects
matches = (
i for i in self if
regex_searcher.findall(getattr(i, 'id')) != [])
except TypeError:
matches = (
i for i in self if search_function(select_attribute(i)))
results = self.__class__()
results._extend_nocheck(matches)
return results |
def _replace_on_id(self, new_object):
"""Replace an object by another with the same id."""
the_id = new_object.id
the_index = self._dict[the_id]
list.__setitem__(self, the_index, new_object) |
def append(self, object):
"""append object to end"""
the_id = object.id
self._check(the_id)
self._dict[the_id] = len(self)
list.append(self, object) |
def union(self, iterable):
"""adds elements with id's not already in the model"""
_dict = self._dict
append = self.append
for i in iterable:
if i.id not in _dict:
append(i) |
def extend(self, iterable):
"""extend list by appending elements from the iterable"""
# Sometimes during initialization from an older pickle, _dict
# will not have initialized yet, because the initialization class was
# left unspecified. This is an issue because unpickling calls
# DictList.extend, which requires the presence of _dict. Therefore,
# the issue is caught and addressed here.
if not hasattr(self, "_dict") or self._dict is None:
self._dict = {}
_dict = self._dict
current_length = len(self)
list.extend(self, iterable)
for i, obj in enumerate(islice(self, current_length, None),
current_length):
the_id = obj.id
if the_id not in _dict:
_dict[the_id] = i
else:
# undo the extend and raise an error
self = self[:current_length]
self._check(the_id)
# if the above succeeded, then the id must be present
# twice in the list being added
raise ValueError("id '%s' at index %d is non-unique. "
"Is it present twice?" % (str(the_id), i)) |
def _extend_nocheck(self, iterable):
"""extends without checking for uniqueness
This function should only be used internally by DictList when it
can guarantee elements are already unique (as in when coming from
self or other DictList). It will be faster because it skips these
checks.
"""
current_length = len(self)
list.extend(self, iterable)
_dict = self._dict
if current_length is 0:
self._generate_index()
return
for i, obj in enumerate(islice(self, current_length, None),
current_length):
_dict[obj.id] = i |
def index(self, id, *args):
"""Determine the position in the list
id: A string or a :class:`~cobra.core.Object.Object`
"""
# because values are unique, start and stop are not relevant
if isinstance(id, string_types):
try:
return self._dict[id]
except KeyError:
raise ValueError("%s not found" % id)
try:
i = self._dict[id.id]
if self[i] is not id:
raise ValueError(
"Another object with the identical id (%s) found" % id.id)
return i
except KeyError:
raise ValueError("%s not found" % str(id)) |
def insert(self, index, object):
"""insert object before index"""
self._check(object.id)
list.insert(self, index, object)
# all subsequent entries now have been shifted up by 1
_dict = self._dict
for i, j in iteritems(_dict):
if j >= index:
_dict[i] = j + 1
_dict[object.id] = index |
def pop(self, *args):
"""remove and return item at index (default last)."""
value = list.pop(self, *args)
index = self._dict.pop(value.id)
# If the pop occured from a location other than the end of the list,
# we will need to subtract 1 from every entry afterwards
if len(args) == 0 or args == [-1]: # removing from the end of the list
return value
_dict = self._dict
for i, j in iteritems(_dict):
if j > index:
_dict[i] = j - 1
return value |
def sort(self, cmp=None, key=None, reverse=False):
"""stable sort *IN PLACE*
cmp(x, y) -> -1, 0, 1
"""
if key is None:
def key(i):
return i.id
if PY3:
list.sort(self, key=key, reverse=reverse)
else:
list.sort(self, cmp=cmp, key=key, reverse=reverse)
self._generate_index() |
def elements(self):
""" Dictionary of elements as keys and their count in the metabolite
as integer. When set, the `formula` property is update accordingly """
tmp_formula = self.formula
if tmp_formula is None:
return {}
# necessary for some old pickles which use the deprecated
# Formula class
tmp_formula = str(self.formula)
# commonly occurring characters in incorrectly constructed formulas
if "*" in tmp_formula:
warn("invalid character '*' found in formula '%s'" % self.formula)
tmp_formula = tmp_formula.replace("*", "")
if "(" in tmp_formula or ")" in tmp_formula:
warn("invalid formula (has parenthesis) in '%s'" % self.formula)
return None
composition = {}
parsed = element_re.findall(tmp_formula)
for (element, count) in parsed:
if count == '':
count = 1
else:
try:
count = float(count)
int_count = int(count)
if count == int_count:
count = int_count
else:
warn("%s is not an integer (in formula %s)" %
(count, self.formula))
except ValueError:
warn("failed to parse %s (in formula %s)" %
(count, self.formula))
return None
if element in composition:
composition[element] += count
else:
composition[element] = count
return composition |
def shadow_price(self):
"""
The shadow price in the most recent solution.
Shadow price is the dual value of the corresponding constraint in the
model.
Warnings
--------
* Accessing shadow prices through a `Solution` object is the safer,
preferred, and only guaranteed to be correct way. You can see how to
do so easily in the examples.
* Shadow price is retrieved from the currently defined
`self._model.solver`. The solver status is checked but there are no
guarantees that the current solver state is the one you are looking
for.
* If you modify the underlying model after an optimization, you will
retrieve the old optimization values.
Raises
------
RuntimeError
If the underlying model was never optimized beforehand or the
metabolite is not part of a model.
OptimizationError
If the solver status is anything other than 'optimal'.
Examples
--------
>>> import cobra
>>> import cobra.test
>>> model = cobra.test.create_test_model("textbook")
>>> solution = model.optimize()
>>> model.metabolites.glc__D_e.shadow_price
-0.09166474637510488
>>> solution.shadow_prices.glc__D_e
-0.091664746375104883
"""
try:
check_solver_status(self._model.solver.status)
return self._model.constraints[self.id].dual
except AttributeError:
raise RuntimeError(
"metabolite '{}' is not part of a model".format(self.id))
# Due to below all-catch, which sucks, need to reraise these.
except (RuntimeError, OptimizationError) as err:
raise_with_traceback(err)
# Would love to catch CplexSolverError and GurobiError here.
except Exception as err:
raise_from(OptimizationError(
"Likely no solution exists. Original solver message: {}."
"".format(str(err))), err) |
def to_yaml(model, sort=False, **kwargs):
"""
Return the model as a YAML document.
``kwargs`` are passed on to ``yaml.dump``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
Returns
-------
str
String representation of the cobra model as a YAML document.
See Also
--------
save_yaml_model : Write directly to a file.
ruamel.yaml.dump : Base function.
"""
obj = model_to_dict(model, sort=sort)
obj["version"] = YAML_SPEC
return yaml.dump(obj, **kwargs) |
def save_yaml_model(model, filename, sort=False, **kwargs):
"""
Write the cobra model to a file in YAML format.
``kwargs`` are passed on to ``yaml.dump``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
filename : str or file-like
File path or descriptor that the YAML representation should be
written to.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
See Also
--------
to_yaml : Return a string representation.
ruamel.yaml.dump : Base function.
"""
obj = model_to_dict(model, sort=sort)
obj["version"] = YAML_SPEC
if isinstance(filename, string_types):
with io.open(filename, "w") as file_handle:
yaml.dump(obj, file_handle, **kwargs)
else:
yaml.dump(obj, filename, **kwargs) |
def load_yaml_model(filename):
"""
Load a cobra model from a file in YAML format.
Parameters
----------
filename : str or file-like
File path or descriptor that contains the YAML document describing the
cobra model.
Returns
-------
cobra.Model
The cobra model as represented in the YAML document.
See Also
--------
from_yaml : Load from a string.
"""
if isinstance(filename, string_types):
with io.open(filename, "r") as file_handle:
return model_from_dict(yaml.load(file_handle))
else:
return model_from_dict(yaml.load(filename)) |
def pfba(model, fraction_of_optimum=1.0, objective=None, reactions=None):
"""Perform basic pFBA (parsimonious Enzyme Usage Flux Balance Analysis)
to minimize total flux.
pFBA [1] adds the minimization of all fluxes the the objective of the
model. This approach is motivated by the idea that high fluxes have a
higher enzyme turn-over and that since producing enzymes is costly,
the cell will try to minimize overall flux while still maximizing the
original objective function, e.g. the growth rate.
Parameters
----------
model : cobra.Model
The model
fraction_of_optimum : float, optional
Fraction of optimum which must be maintained. The original objective
reaction is constrained to be greater than maximal_value *
fraction_of_optimum.
objective : dict or model.problem.Objective
A desired objective to use during optimization in addition to the
pFBA objective. Dictionaries (reaction as key, coefficient as value)
can be used for linear objectives.
reactions : iterable
List of reactions or reaction identifiers. Implies `return_frame` to
be true. Only return fluxes for the given reactions. Faster than
fetching all fluxes if only a few are needed.
Returns
-------
cobra.Solution
The solution object to the optimized model with pFBA constraints added.
References
----------
.. [1] Lewis, N. E., Hixson, K. K., Conrad, T. M., Lerman, J. A.,
Charusanti, P., Polpitiya, A. D., Palsson, B. O. (2010). Omic data
from evolved E. coli are consistent with computed optimal growth from
genome-scale models. Molecular Systems Biology, 6,
390. doi:10.1038/msb.2010.47
"""
reactions = model.reactions if reactions is None \
else model.reactions.get_by_any(reactions)
with model as m:
add_pfba(m, objective=objective,
fraction_of_optimum=fraction_of_optimum)
m.slim_optimize(error_value=None)
solution = get_solution(m, reactions=reactions)
return solution |
def add_pfba(model, objective=None, fraction_of_optimum=1.0):
"""Add pFBA objective
Add objective to minimize the summed flux of all reactions to the
current objective.
See Also
-------
pfba
Parameters
----------
model : cobra.Model
The model to add the objective to
objective :
An objective to set in combination with the pFBA objective.
fraction_of_optimum : float
Fraction of optimum which must be maintained. The original objective
reaction is constrained to be greater than maximal_value *
fraction_of_optimum.
"""
if objective is not None:
model.objective = objective
if model.solver.objective.name == '_pfba_objective':
raise ValueError('The model already has a pFBA objective.')
sutil.fix_objective_as_constraint(model, fraction=fraction_of_optimum)
reaction_variables = ((rxn.forward_variable, rxn.reverse_variable)
for rxn in model.reactions)
variables = chain(*reaction_variables)
model.objective = model.problem.Objective(
Zero, direction='min', sloppy=True, name="_pfba_objective")
model.objective.set_linear_coefficients({v: 1.0 for v in variables}) |
def metabolite_summary(met, solution=None, threshold=0.01, fva=False,
names=False, floatfmt='.3g'):
"""
Print a summary of the production and consumption fluxes.
This method requires the model for which this metabolite is a part
to be solved.
Parameters
----------
solution : cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
"""
if names:
emit = attrgetter('name')
else:
emit = attrgetter('id')
if solution is None:
met.model.slim_optimize(error_value=None)
solution = get_solution(met.model, reactions=met.reactions)
rxns = sorted(met.reactions, key=attrgetter("id"))
rxn_id = list()
rxn_name = list()
flux = list()
reaction = list()
for rxn in rxns:
rxn_id.append(rxn.id)
rxn_name.append(format_long_string(emit(rxn), 10))
flux.append(solution[rxn.id] * rxn.metabolites[met])
txt = rxn.build_reaction_string(use_metabolite_names=names)
reaction.append(format_long_string(txt, 40 if fva is not None else 50))
flux_summary = pd.DataFrame({
"id": rxn_name,
"flux": flux,
"reaction": reaction
}, index=rxn_id)
if fva is not None:
if hasattr(fva, 'columns'):
fva_results = fva
else:
fva_results = flux_variability_analysis(
met.model, list(met.reactions), fraction_of_optimum=fva)
flux_summary["maximum"] = zeros(len(rxn_id), dtype=float)
flux_summary["minimum"] = zeros(len(rxn_id), dtype=float)
for rxn in rxns:
fmax = rxn.metabolites[met] * fva_results.at[rxn.id, "maximum"]
fmin = rxn.metabolites[met] * fva_results.at[rxn.id, "minimum"]
if abs(fmin) <= abs(fmax):
flux_summary.at[rxn.id, "fmax"] = fmax
flux_summary.at[rxn.id, "fmin"] = fmin
else:
# Reverse fluxes.
flux_summary.at[rxn.id, "fmax"] = fmin
flux_summary.at[rxn.id, "fmin"] = fmax
assert flux_summary["flux"].sum() < 1E-6, "Error in flux balance"
flux_summary = _process_flux_dataframe(flux_summary, fva, threshold,
floatfmt)
flux_summary['percent'] = 0
total_flux = flux_summary.loc[flux_summary.is_input, "flux"].sum()
flux_summary.loc[flux_summary.is_input, 'percent'] = \
flux_summary.loc[flux_summary.is_input, 'flux'] / total_flux
flux_summary.loc[~flux_summary.is_input, 'percent'] = \
flux_summary.loc[~flux_summary.is_input, 'flux'] / total_flux
flux_summary['percent'] = flux_summary.percent.apply(
lambda x: '{:.0%}'.format(x))
if fva is not None:
flux_table = tabulate(
flux_summary.loc[:, ['percent', 'flux', 'fva_fmt', 'id',
'reaction']].values, floatfmt=floatfmt,
headers=['%', 'FLUX', 'RANGE', 'RXN ID', 'REACTION']).split('\n')
else:
flux_table = tabulate(
flux_summary.loc[:, ['percent', 'flux', 'id', 'reaction']].values,
floatfmt=floatfmt, headers=['%', 'FLUX', 'RXN ID', 'REACTION']
).split('\n')
flux_table_head = flux_table[:2]
met_tag = "{0} ({1})".format(format_long_string(met.name, 45),
format_long_string(met.id, 10))
head = "PRODUCING REACTIONS -- " + met_tag
print_(head)
print_("-" * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(
pd.np.array(flux_table[2:])[flux_summary.is_input.values]))
print_()
print_("CONSUMING REACTIONS -- " + met_tag)
print_("-" * len(head))
print_('\n'.join(flux_table_head))
print_('\n'.join(
pd.np.array(flux_table[2:])[~flux_summary.is_input.values])) |
def model_summary(model, solution=None, threshold=0.01, fva=None, names=False,
floatfmt='.3g'):
"""
Print a summary of the input and output fluxes of the model.
Parameters
----------
solution: cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
"""
if names:
emit = attrgetter('name')
else:
emit = attrgetter('id')
objective_reactions = linear_reaction_coefficients(model)
boundary_reactions = model.exchanges
summary_rxns = set(objective_reactions.keys()).union(boundary_reactions)
if solution is None:
model.slim_optimize(error_value=None)
solution = get_solution(model, reactions=summary_rxns)
# Create a dataframe of objective fluxes
obj_fluxes = pd.DataFrame({key: solution[key.id] * value for key,
value in iteritems(objective_reactions)},
index=['flux']).T
obj_fluxes['id'] = obj_fluxes.apply(
lambda x: format_long_string(x.name.id, 15), 1)
# Build a dictionary of metabolite production from the boundary reactions
metabolites = {m for r in boundary_reactions for m in r.metabolites}
index = sorted(metabolites, key=attrgetter('id'))
metabolite_fluxes = pd.DataFrame({
'id': [format_long_string(emit(m), 15) for m in index],
'flux': zeros(len(index), dtype=float)
}, index=[m.id for m in index])
for rxn in boundary_reactions:
for met, stoich in iteritems(rxn.metabolites):
metabolite_fluxes.at[met.id, 'flux'] += stoich * solution[rxn.id]
# Calculate FVA results if requested
if fva is not None:
if len(index) != len(boundary_reactions):
LOGGER.warning(
"There exists more than one boundary reaction per metabolite. "
"Please be careful when evaluating flux ranges.")
metabolite_fluxes['fmin'] = zeros(len(index), dtype=float)
metabolite_fluxes['fmax'] = zeros(len(index), dtype=float)
if hasattr(fva, 'columns'):
fva_results = fva
else:
fva_results = flux_variability_analysis(
model, reaction_list=boundary_reactions,
fraction_of_optimum=fva)
for rxn in boundary_reactions:
for met, stoich in iteritems(rxn.metabolites):
fmin = stoich * fva_results.at[rxn.id, 'minimum']
fmax = stoich * fva_results.at[rxn.id, 'maximum']
# Correct 'max' and 'min' for negative values
if abs(fmin) <= abs(fmax):
metabolite_fluxes.at[met.id, 'fmin'] += fmin
metabolite_fluxes.at[met.id, 'fmax'] += fmax
else:
metabolite_fluxes.at[met.id, 'fmin'] += fmax
metabolite_fluxes.at[met.id, 'fmax'] += fmin
# Generate a dataframe of boundary fluxes
metabolite_fluxes = _process_flux_dataframe(
metabolite_fluxes, fva, threshold, floatfmt)
# Begin building string output table
def get_str_table(species_df, fva=False):
"""Formats a string table for each column"""
if fva:
return tabulate(
species_df.loc[:, ['id', 'flux', 'fva_fmt']].values,
floatfmt=floatfmt, tablefmt='simple',
headers=['id', 'Flux', 'Range']).split('\n')
else:
return tabulate(species_df.loc[:, ['id', 'flux']].values,
floatfmt=floatfmt, tablefmt='plain').split('\n')
in_table = get_str_table(
metabolite_fluxes[metabolite_fluxes['is_input']], fva=fva is not None)
out_table = get_str_table(
metabolite_fluxes[~metabolite_fluxes['is_input']], fva=fva is not None)
obj_table = get_str_table(obj_fluxes, fva=False)
# Print nested output table
print_(tabulate(
[entries for entries in zip_longest(in_table, out_table, obj_table)],
headers=['IN FLUXES', 'OUT FLUXES', 'OBJECTIVES'], tablefmt='simple')) |
def _process_flux_dataframe(flux_dataframe, fva, threshold, floatfmt):
"""Some common methods for processing a database of flux information into
print-ready formats. Used in both model_summary and metabolite_summary. """
abs_flux = flux_dataframe['flux'].abs()
flux_threshold = threshold * abs_flux.max()
# Drop unused boundary fluxes
if fva is None:
flux_dataframe = flux_dataframe.loc[
abs_flux >= flux_threshold, :].copy()
else:
flux_dataframe = flux_dataframe.loc[
(abs_flux >= flux_threshold) |
(flux_dataframe['fmin'].abs() >= flux_threshold) |
(flux_dataframe['fmax'].abs() >= flux_threshold), :].copy()
# Why set to zero? If included show true value?
# flux_dataframe.loc[
# flux_dataframe['flux'].abs() < flux_threshold, 'flux'] = 0
# Make all fluxes positive
if fva is None:
flux_dataframe['is_input'] = (flux_dataframe['flux'] >= 0)
flux_dataframe['flux'] = flux_dataframe['flux'].abs()
else:
def get_direction(flux, fmin, fmax):
""" decide whether or not to reverse a flux to make it positive """
if flux < 0:
return -1
elif flux > 0:
return 1
elif (fmax > 0) & (fmin <= 0):
return 1
elif (fmax < 0) & (fmin >= 0):
return -1
elif ((fmax + fmin) / 2) < 0:
return -1
else:
return 1
sign = flux_dataframe.apply(
lambda x: get_direction(x.flux, x.fmin, x.fmax), 1)
flux_dataframe['is_input'] = sign == 1
flux_dataframe.loc[:, ['flux', 'fmin', 'fmax']] = \
flux_dataframe.loc[:, ['flux', 'fmin', 'fmax']].multiply(
sign, 0).astype('float').round(6)
flux_dataframe.loc[:, ['flux', 'fmin', 'fmax']] = \
flux_dataframe.loc[:, ['flux', 'fmin', 'fmax']].applymap(
lambda x: x if abs(x) > 1E-6 else 0)
if fva is not None:
flux_dataframe['fva_fmt'] = flux_dataframe.apply(
lambda x: ("[{0.fmin:" + floatfmt + "}, {0.fmax:" +
floatfmt + "}]").format(x), 1)
flux_dataframe = flux_dataframe.sort_values(
by=['flux', 'fmax', 'fmin', 'id'],
ascending=[False, False, False, True])
else:
flux_dataframe = flux_dataframe.sort_values(
by=['flux', 'id'], ascending=[False, True])
return flux_dataframe |
def linear_reaction_coefficients(model, reactions=None):
"""Coefficient for the reactions in a linear objective.
Parameters
----------
model : cobra model
the model object that defined the objective
reactions : list
an optional list for the reactions to get the coefficients for. All
reactions if left missing.
Returns
-------
dict
A dictionary where the key is the reaction object and the value is
the corresponding coefficient. Empty dictionary if there are no
linear terms in the objective.
"""
linear_coefficients = {}
reactions = model.reactions if not reactions else reactions
try:
objective_expression = model.solver.objective.expression
coefficients = objective_expression.as_coefficients_dict()
except AttributeError:
return linear_coefficients
for rxn in reactions:
forward_coefficient = coefficients.get(rxn.forward_variable, 0)
reverse_coefficient = coefficients.get(rxn.reverse_variable, 0)
if forward_coefficient != 0:
if forward_coefficient == -reverse_coefficient:
linear_coefficients[rxn] = float(forward_coefficient)
return linear_coefficients |
def _valid_atoms(model, expression):
"""Check whether a sympy expression references the correct variables.
Parameters
----------
model : cobra.Model
The model in which to check for variables.
expression : sympy.Basic
A sympy expression.
Returns
-------
boolean
True if all referenced variables are contained in model, False
otherwise.
"""
atoms = expression.atoms(optlang.interface.Variable)
return all(a.problem is model.solver for a in atoms) |
def set_objective(model, value, additive=False):
"""Set the model objective.
Parameters
----------
model : cobra model
The model to set the objective for
value : model.problem.Objective,
e.g. optlang.glpk_interface.Objective, sympy.Basic or dict
If the model objective is linear, the value can be a new Objective
object or a dictionary with linear coefficients where each key is a
reaction and the element the new coefficient (float).
If the objective is not linear and `additive` is true, only values
of class Objective.
additive : boolmodel.reactions.Biomass_Ecoli_core.bounds = (0.1, 0.1)
If true, add the terms to the current objective, otherwise start with
an empty objective.
"""
interface = model.problem
reverse_value = model.solver.objective.expression
reverse_value = interface.Objective(
reverse_value, direction=model.solver.objective.direction,
sloppy=True)
if isinstance(value, dict):
if not model.objective.is_Linear:
raise ValueError('can only update non-linear objectives '
'additively using object of class '
'model.problem.Objective, not %s' %
type(value))
if not additive:
model.solver.objective = interface.Objective(
Zero, direction=model.solver.objective.direction)
for reaction, coef in value.items():
model.solver.objective.set_linear_coefficients(
{reaction.forward_variable: coef,
reaction.reverse_variable: -coef})
elif isinstance(value, (Basic, optlang.interface.Objective)):
if isinstance(value, Basic):
value = interface.Objective(
value, direction=model.solver.objective.direction,
sloppy=False)
# Check whether expression only uses variables from current model
# clone the objective if not, faster than cloning without checking
if not _valid_atoms(model, value.expression):
value = interface.Objective.clone(value, model=model.solver)
if not additive:
model.solver.objective = value
else:
model.solver.objective += value.expression
else:
raise TypeError(
'%r is not a valid objective for %r.' % (value, model.solver))
context = get_context(model)
if context:
def reset():
model.solver.objective = reverse_value
model.solver.objective.direction = reverse_value.direction
context(reset) |
def interface_to_str(interface):
"""Give a string representation for an optlang interface.
Parameters
----------
interface : string, ModuleType
Full name of the interface in optlang or cobra representation.
For instance 'optlang.glpk_interface' or 'optlang-glpk'.
Returns
-------
string
The name of the interface as a string
"""
if isinstance(interface, ModuleType):
interface = interface.__name__
return re.sub(r"optlang.|.interface", "", interface) |
def get_solver_name(mip=False, qp=False):
"""Select a solver for a given optimization problem.
Parameters
----------
mip : bool
Does the solver require mixed integer linear programming capabilities?
qp : bool
Does the solver require quadratic programming capabilities?
Returns
-------
string
The name of feasible solver.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
if len(solvers) == 0:
raise SolverNotFound("no solvers installed")
# Those lists need to be updated as optlang implements more solvers
mip_order = ["gurobi", "cplex", "glpk"]
lp_order = ["glpk", "cplex", "gurobi"]
qp_order = ["gurobi", "cplex"]
if mip is False and qp is False:
for solver_name in lp_order:
if solver_name in solvers:
return solver_name
# none of them are in the list order - so return the first one
return list(solvers)[0]
elif qp: # mip does not yet matter for this determination
for solver_name in qp_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no qp-capable solver found")
else:
for solver_name in mip_order:
if solver_name in solvers:
return solver_name
raise SolverNotFound("no mip-capable solver found") |
def choose_solver(model, solver=None, qp=False):
"""Choose a solver given a solver name and model.
This will choose a solver compatible with the model and required
capabilities. Also respects model.solver where it can.
Parameters
----------
model : a cobra model
The model for which to choose the solver.
solver : str, optional
The name of the solver to be used.
qp : boolean, optional
Whether the solver needs Quadratic Programming capabilities.
Returns
-------
solver : an optlang solver interface
Returns a valid solver for the problem.
Raises
------
SolverNotFound
If no suitable solver could be found.
"""
if solver is None:
solver = model.problem
else:
model.solver = solver
# Check for QP, raise error if no QP solver found
if qp and interface_to_str(solver) not in qp_solvers:
solver = solvers[get_solver_name(qp=True)]
return solver |
def add_cons_vars_to_problem(model, what, **kwargs):
"""Add variables and constraints to a Model's solver object.
Useful for variables and constraints that can not be expressed with
reactions and lower/upper bounds. Will integrate with the Model's context
manager in order to revert changes upon leaving the context.
Parameters
----------
model : a cobra model
The model to which to add the variables and constraints.
what : list or tuple of optlang variables or constraints.
The variables or constraints to add to the model. Must be of class
`model.problem.Variable` or
`model.problem.Constraint`.
**kwargs : keyword arguments
passed to solver.add()
"""
context = get_context(model)
model.solver.add(what, **kwargs)
if context:
context(partial(model.solver.remove, what)) |
def remove_cons_vars_from_problem(model, what):
"""Remove variables and constraints from a Model's solver object.
Useful to temporarily remove variables and constraints from a Models's
solver object.
Parameters
----------
model : a cobra model
The model from which to remove the variables and constraints.
what : list or tuple of optlang variables or constraints.
The variables or constraints to remove from the model. Must be of
class `model.problem.Variable` or
`model.problem.Constraint`.
"""
context = get_context(model)
model.solver.remove(what)
if context:
context(partial(model.solver.add, what)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.