id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
18,300
|
tonycpsu/panwid
|
panwid/scroll.py
|
Scrollable._adjust_trim_top
|
def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
# If the cursor was moved by the most recent keypress, adjust trim_top
# so that the new cursor position is within the displayed canvas part.
# But don't do this if the cursor is at the top/bottom edge so we can still scroll out
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1)
|
python
|
def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
# If the cursor was moved by the most recent keypress, adjust trim_top
# so that the new cursor position is within the displayed canvas part.
# But don't do this if the cursor is at the top/bottom edge so we can still scroll out
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1)
|
[
"def",
"_adjust_trim_top",
"(",
"self",
",",
"canv",
",",
"size",
")",
":",
"action",
"=",
"self",
".",
"_scroll_action",
"self",
".",
"_scroll_action",
"=",
"None",
"maxcol",
",",
"maxrow",
"=",
"size",
"trim_top",
"=",
"self",
".",
"_trim_top",
"canv_rows",
"=",
"canv",
".",
"rows",
"(",
")",
"if",
"trim_top",
"<",
"0",
":",
"# Negative trim_top values use bottom of canvas as reference",
"trim_top",
"=",
"canv_rows",
"-",
"maxrow",
"+",
"trim_top",
"+",
"1",
"if",
"canv_rows",
"<=",
"maxrow",
":",
"self",
".",
"_trim_top",
"=",
"0",
"# Reset scroll position",
"return",
"def",
"ensure_bounds",
"(",
"new_trim_top",
")",
":",
"return",
"max",
"(",
"0",
",",
"min",
"(",
"canv_rows",
"-",
"maxrow",
",",
"new_trim_top",
")",
")",
"if",
"action",
"==",
"SCROLL_LINE_UP",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"-",
"1",
")",
"elif",
"action",
"==",
"SCROLL_LINE_DOWN",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"+",
"1",
")",
"elif",
"action",
"==",
"SCROLL_PAGE_UP",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"-",
"maxrow",
"+",
"1",
")",
"elif",
"action",
"==",
"SCROLL_PAGE_DOWN",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
"+",
"maxrow",
"-",
"1",
")",
"elif",
"action",
"==",
"SCROLL_TO_TOP",
":",
"self",
".",
"_trim_top",
"=",
"0",
"elif",
"action",
"==",
"SCROLL_TO_END",
":",
"self",
".",
"_trim_top",
"=",
"canv_rows",
"-",
"maxrow",
"else",
":",
"self",
".",
"_trim_top",
"=",
"ensure_bounds",
"(",
"trim_top",
")",
"# If the cursor was moved by the most recent keypress, adjust trim_top",
"# so that the new cursor position is within the displayed canvas part.",
"# But don't do this if the cursor is at the top/bottom edge so we can still scroll out",
"if",
"self",
".",
"_old_cursor_coords",
"is",
"not",
"None",
"and",
"self",
".",
"_old_cursor_coords",
"!=",
"canv",
".",
"cursor",
":",
"self",
".",
"_old_cursor_coords",
"=",
"None",
"curscol",
",",
"cursrow",
"=",
"canv",
".",
"cursor",
"if",
"cursrow",
"<",
"self",
".",
"_trim_top",
":",
"self",
".",
"_trim_top",
"=",
"cursrow",
"elif",
"cursrow",
">=",
"self",
".",
"_trim_top",
"+",
"maxrow",
":",
"self",
".",
"_trim_top",
"=",
"max",
"(",
"0",
",",
"cursrow",
"-",
"maxrow",
"+",
"1",
")"
] |
Adjust self._trim_top according to self._scroll_action
|
[
"Adjust",
"self",
".",
"_trim_top",
"according",
"to",
"self",
".",
"_scroll_action"
] |
e83a1f612cf5c53de88a7180c1b84b3b7b85460a
|
https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L177-L224
|
18,301
|
tonycpsu/panwid
|
panwid/scroll.py
|
Scrollable.rows_max
|
def rows_max(self, size=None, focus=False):
"""Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned.
"""
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError('Not a flow/box widget: %r' % self._original_widget)
return self._rows_max_cached
|
python
|
def rows_max(self, size=None, focus=False):
"""Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned.
"""
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError('Not a flow/box widget: %r' % self._original_widget)
return self._rows_max_cached
|
[
"def",
"rows_max",
"(",
"self",
",",
"size",
"=",
"None",
",",
"focus",
"=",
"False",
")",
":",
"if",
"size",
"is",
"not",
"None",
":",
"ow",
"=",
"self",
".",
"_original_widget",
"ow_size",
"=",
"self",
".",
"_get_original_widget_size",
"(",
"size",
")",
"sizing",
"=",
"ow",
".",
"sizing",
"(",
")",
"if",
"FIXED",
"in",
"sizing",
":",
"self",
".",
"_rows_max_cached",
"=",
"ow",
".",
"pack",
"(",
"ow_size",
",",
"focus",
")",
"[",
"1",
"]",
"elif",
"FLOW",
"in",
"sizing",
":",
"self",
".",
"_rows_max_cached",
"=",
"ow",
".",
"rows",
"(",
"ow_size",
",",
"focus",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Not a flow/box widget: %r'",
"%",
"self",
".",
"_original_widget",
")",
"return",
"self",
".",
"_rows_max_cached"
] |
Return the number of rows for `size`
If `size` is not given, the currently rendered number of rows is returned.
|
[
"Return",
"the",
"number",
"of",
"rows",
"for",
"size"
] |
e83a1f612cf5c53de88a7180c1b84b3b7b85460a
|
https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L257-L272
|
18,302
|
tonycpsu/panwid
|
panwid/scroll.py
|
ScrollBar.scrolling_base_widget
|
def scrolling_base_widget(self):
"""Nearest `original_widget` that is compatible with the scrolling API"""
def orig_iter(w):
while hasattr(w, 'original_widget'):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, 'get_scrollpos') and hasattr(w, 'rows_max')
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
raise ValueError('Not compatible to be wrapped by ScrollBar: %r' % w)
|
python
|
def scrolling_base_widget(self):
"""Nearest `original_widget` that is compatible with the scrolling API"""
def orig_iter(w):
while hasattr(w, 'original_widget'):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, 'get_scrollpos') and hasattr(w, 'rows_max')
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
raise ValueError('Not compatible to be wrapped by ScrollBar: %r' % w)
|
[
"def",
"scrolling_base_widget",
"(",
"self",
")",
":",
"def",
"orig_iter",
"(",
"w",
")",
":",
"while",
"hasattr",
"(",
"w",
",",
"'original_widget'",
")",
":",
"w",
"=",
"w",
".",
"original_widget",
"yield",
"w",
"yield",
"w",
"def",
"is_scrolling_widget",
"(",
"w",
")",
":",
"return",
"hasattr",
"(",
"w",
",",
"'get_scrollpos'",
")",
"and",
"hasattr",
"(",
"w",
",",
"'rows_max'",
")",
"for",
"w",
"in",
"orig_iter",
"(",
"self",
")",
":",
"if",
"is_scrolling_widget",
"(",
"w",
")",
":",
"return",
"w",
"raise",
"ValueError",
"(",
"'Not compatible to be wrapped by ScrollBar: %r'",
"%",
"w",
")"
] |
Nearest `original_widget` that is compatible with the scrolling API
|
[
"Nearest",
"original_widget",
"that",
"is",
"compatible",
"with",
"the",
"scrolling",
"API"
] |
e83a1f612cf5c53de88a7180c1b84b3b7b85460a
|
https://github.com/tonycpsu/panwid/blob/e83a1f612cf5c53de88a7180c1b84b3b7b85460a/panwid/scroll.py#L389-L403
|
18,303
|
kyuupichan/aiorpcX
|
aiorpcx/curio.py
|
ignore_after
|
def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True)
|
python
|
def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True)
|
[
"def",
"ignore_after",
"(",
"seconds",
",",
"coro",
"=",
"None",
",",
"*",
"args",
",",
"timeout_result",
"=",
"None",
")",
":",
"if",
"coro",
":",
"return",
"_ignore_after_func",
"(",
"seconds",
",",
"False",
",",
"coro",
",",
"args",
",",
"timeout_result",
")",
"return",
"TimeoutAfter",
"(",
"seconds",
",",
"ignore",
"=",
"True",
")"
] |
Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
|
[
"Execute",
"the",
"specified",
"coroutine",
"and",
"return",
"its",
"result",
".",
"Issue",
"a",
"cancellation",
"request",
"after",
"seconds",
"have",
"elapsed",
".",
"When",
"a",
"timeout",
"occurs",
"no",
"exception",
"is",
"raised",
".",
"Instead",
"timeout_result",
"is",
"returned",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L392-L411
|
18,304
|
kyuupichan/aiorpcX
|
aiorpcx/curio.py
|
TaskGroup._add_task
|
def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done)
|
python
|
def _add_task(self, task):
'''Add an already existing task to the task group.'''
if hasattr(task, '_task_group'):
raise RuntimeError('task is already part of a group')
if self._closed:
raise RuntimeError('task group is closed')
task._task_group = self
if task.done():
self._done.append(task)
else:
self._pending.add(task)
task.add_done_callback(self._on_done)
|
[
"def",
"_add_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"hasattr",
"(",
"task",
",",
"'_task_group'",
")",
":",
"raise",
"RuntimeError",
"(",
"'task is already part of a group'",
")",
"if",
"self",
".",
"_closed",
":",
"raise",
"RuntimeError",
"(",
"'task group is closed'",
")",
"task",
".",
"_task_group",
"=",
"self",
"if",
"task",
".",
"done",
"(",
")",
":",
"self",
".",
"_done",
".",
"append",
"(",
"task",
")",
"else",
":",
"self",
".",
"_pending",
".",
"add",
"(",
"task",
")",
"task",
".",
"add_done_callback",
"(",
"self",
".",
"_on_done",
")"
] |
Add an already existing task to the task group.
|
[
"Add",
"an",
"already",
"existing",
"task",
"to",
"the",
"task",
"group",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L118-L129
|
18,305
|
kyuupichan/aiorpcX
|
aiorpcx/curio.py
|
TaskGroup.next_done
|
async def next_done(self):
'''Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
'''
if not self._done and self._pending:
self._done_event.clear()
await self._done_event.wait()
if self._done:
return self._done.popleft()
return None
|
python
|
async def next_done(self):
'''Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
'''
if not self._done and self._pending:
self._done_event.clear()
await self._done_event.wait()
if self._done:
return self._done.popleft()
return None
|
[
"async",
"def",
"next_done",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_done",
"and",
"self",
".",
"_pending",
":",
"self",
".",
"_done_event",
".",
"clear",
"(",
")",
"await",
"self",
".",
"_done_event",
".",
"wait",
"(",
")",
"if",
"self",
".",
"_done",
":",
"return",
"self",
".",
"_done",
".",
"popleft",
"(",
")",
"return",
"None"
] |
Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
|
[
"Returns",
"the",
"next",
"completed",
"task",
".",
"Returns",
"None",
"if",
"no",
"more",
"tasks",
"remain",
".",
"A",
"TaskGroup",
"may",
"also",
"be",
"used",
"as",
"an",
"asynchronous",
"iterator",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L155-L164
|
18,306
|
kyuupichan/aiorpcX
|
aiorpcx/curio.py
|
TaskGroup.join
|
async def join(self):
'''Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
'''
def errored(task):
return not task.cancelled() and task.exception()
try:
if self._wait in (all, object):
while True:
task = await self.next_done()
if task is None:
return
if errored(task):
break
if self._wait is object:
if task.cancelled() or task.result() is not None:
return
else: # any
task = await self.next_done()
if task is None or not errored(task):
return
finally:
await self.cancel_remaining()
if errored(task):
raise task.exception()
|
python
|
async def join(self):
'''Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
'''
def errored(task):
return not task.cancelled() and task.exception()
try:
if self._wait in (all, object):
while True:
task = await self.next_done()
if task is None:
return
if errored(task):
break
if self._wait is object:
if task.cancelled() or task.result() is not None:
return
else: # any
task = await self.next_done()
if task is None or not errored(task):
return
finally:
await self.cancel_remaining()
if errored(task):
raise task.exception()
|
[
"async",
"def",
"join",
"(",
"self",
")",
":",
"def",
"errored",
"(",
"task",
")",
":",
"return",
"not",
"task",
".",
"cancelled",
"(",
")",
"and",
"task",
".",
"exception",
"(",
")",
"try",
":",
"if",
"self",
".",
"_wait",
"in",
"(",
"all",
",",
"object",
")",
":",
"while",
"True",
":",
"task",
"=",
"await",
"self",
".",
"next_done",
"(",
")",
"if",
"task",
"is",
"None",
":",
"return",
"if",
"errored",
"(",
"task",
")",
":",
"break",
"if",
"self",
".",
"_wait",
"is",
"object",
":",
"if",
"task",
".",
"cancelled",
"(",
")",
"or",
"task",
".",
"result",
"(",
")",
"is",
"not",
"None",
":",
"return",
"else",
":",
"# any",
"task",
"=",
"await",
"self",
".",
"next_done",
"(",
")",
"if",
"task",
"is",
"None",
"or",
"not",
"errored",
"(",
"task",
")",
":",
"return",
"finally",
":",
"await",
"self",
".",
"cancel_remaining",
"(",
")",
"if",
"errored",
"(",
"task",
")",
":",
"raise",
"task",
".",
"exception",
"(",
")"
] |
Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
|
[
"Wait",
"for",
"tasks",
"in",
"the",
"group",
"to",
"terminate",
"according",
"to",
"the",
"wait",
"policy",
"for",
"the",
"group",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L176-L211
|
18,307
|
kyuupichan/aiorpcX
|
aiorpcx/curio.py
|
TaskGroup.cancel_remaining
|
async def cancel_remaining(self):
'''Cancel all remaining tasks.'''
self._closed = True
task_list = list(self._pending)
for task in task_list:
task.cancel()
for task in task_list:
with suppress(CancelledError):
await task
|
python
|
async def cancel_remaining(self):
'''Cancel all remaining tasks.'''
self._closed = True
task_list = list(self._pending)
for task in task_list:
task.cancel()
for task in task_list:
with suppress(CancelledError):
await task
|
[
"async",
"def",
"cancel_remaining",
"(",
"self",
")",
":",
"self",
".",
"_closed",
"=",
"True",
"task_list",
"=",
"list",
"(",
"self",
".",
"_pending",
")",
"for",
"task",
"in",
"task_list",
":",
"task",
".",
"cancel",
"(",
")",
"for",
"task",
"in",
"task_list",
":",
"with",
"suppress",
"(",
"CancelledError",
")",
":",
"await",
"task"
] |
Cancel all remaining tasks.
|
[
"Cancel",
"all",
"remaining",
"tasks",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L213-L221
|
18,308
|
kyuupichan/aiorpcX
|
aiorpcx/socks.py
|
SOCKSProxy._connect_one
|
async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception
|
python
|
async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception
|
[
"async",
"def",
"_connect_one",
"(",
"self",
",",
"remote_address",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"for",
"info",
"in",
"await",
"loop",
".",
"getaddrinfo",
"(",
"str",
"(",
"self",
".",
"address",
".",
"host",
")",
",",
"self",
".",
"address",
".",
"port",
",",
"type",
"=",
"socket",
".",
"SOCK_STREAM",
")",
":",
"# This object has state so is only good for one connection",
"client",
"=",
"self",
".",
"protocol",
"(",
"remote_address",
",",
"self",
".",
"auth",
")",
"sock",
"=",
"socket",
".",
"socket",
"(",
"family",
"=",
"info",
"[",
"0",
"]",
")",
"try",
":",
"# A non-blocking socket is required by loop socket methods",
"sock",
".",
"setblocking",
"(",
"False",
")",
"await",
"loop",
".",
"sock_connect",
"(",
"sock",
",",
"info",
"[",
"4",
"]",
")",
"await",
"self",
".",
"_handshake",
"(",
"client",
",",
"sock",
",",
"loop",
")",
"self",
".",
"peername",
"=",
"sock",
".",
"getpeername",
"(",
")",
"return",
"sock",
"except",
"(",
"OSError",
",",
"SOCKSProtocolError",
")",
"as",
"e",
":",
"exception",
"=",
"e",
"# Don't close the socket because of an asyncio bug",
"# see https://github.com/kyuupichan/aiorpcX/issues/8",
"return",
"exception"
] |
Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
|
[
"Connect",
"to",
"the",
"proxy",
"and",
"perform",
"a",
"handshake",
"requesting",
"a",
"connection",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L300-L323
|
18,309
|
kyuupichan/aiorpcX
|
aiorpcx/socks.py
|
SOCKSProxy._connect
|
async def _connect(self, remote_addresses):
'''Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
'''
assert remote_addresses
exceptions = []
for remote_address in remote_addresses:
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
return sock, remote_address
exceptions.append(sock)
strings = set(f'{exc!r}' for exc in exceptions)
raise (exceptions[0] if len(strings) == 1 else
OSError(f'multiple exceptions: {", ".join(strings)}'))
|
python
|
async def _connect(self, remote_addresses):
'''Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
'''
assert remote_addresses
exceptions = []
for remote_address in remote_addresses:
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
return sock, remote_address
exceptions.append(sock)
strings = set(f'{exc!r}' for exc in exceptions)
raise (exceptions[0] if len(strings) == 1 else
OSError(f'multiple exceptions: {", ".join(strings)}'))
|
[
"async",
"def",
"_connect",
"(",
"self",
",",
"remote_addresses",
")",
":",
"assert",
"remote_addresses",
"exceptions",
"=",
"[",
"]",
"for",
"remote_address",
"in",
"remote_addresses",
":",
"sock",
"=",
"await",
"self",
".",
"_connect_one",
"(",
"remote_address",
")",
"if",
"isinstance",
"(",
"sock",
",",
"socket",
".",
"socket",
")",
":",
"return",
"sock",
",",
"remote_address",
"exceptions",
".",
"append",
"(",
"sock",
")",
"strings",
"=",
"set",
"(",
"f'{exc!r}'",
"for",
"exc",
"in",
"exceptions",
")",
"raise",
"(",
"exceptions",
"[",
"0",
"]",
"if",
"len",
"(",
"strings",
")",
"==",
"1",
"else",
"OSError",
"(",
"f'multiple exceptions: {\", \".join(strings)}'",
")",
")"
] |
Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
|
[
"Connect",
"to",
"the",
"proxy",
"and",
"perform",
"a",
"handshake",
"requesting",
"a",
"connection",
"to",
"each",
"address",
"in",
"addresses",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L325-L342
|
18,310
|
kyuupichan/aiorpcX
|
aiorpcx/socks.py
|
SOCKSProxy._detect_proxy
|
async def _detect_proxy(self):
'''Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
'''
if self.protocol is SOCKS4a:
remote_address = NetAddress('www.apple.com', 80)
else:
remote_address = NetAddress('8.8.8.8', 53)
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
sock.close()
return True
# SOCKSFailure indicates something failed, but that we are likely talking to a
# proxy
return isinstance(sock, SOCKSFailure)
|
python
|
async def _detect_proxy(self):
'''Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
'''
if self.protocol is SOCKS4a:
remote_address = NetAddress('www.apple.com', 80)
else:
remote_address = NetAddress('8.8.8.8', 53)
sock = await self._connect_one(remote_address)
if isinstance(sock, socket.socket):
sock.close()
return True
# SOCKSFailure indicates something failed, but that we are likely talking to a
# proxy
return isinstance(sock, SOCKSFailure)
|
[
"async",
"def",
"_detect_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"protocol",
"is",
"SOCKS4a",
":",
"remote_address",
"=",
"NetAddress",
"(",
"'www.apple.com'",
",",
"80",
")",
"else",
":",
"remote_address",
"=",
"NetAddress",
"(",
"'8.8.8.8'",
",",
"53",
")",
"sock",
"=",
"await",
"self",
".",
"_connect_one",
"(",
"remote_address",
")",
"if",
"isinstance",
"(",
"sock",
",",
"socket",
".",
"socket",
")",
":",
"sock",
".",
"close",
"(",
")",
"return",
"True",
"# SOCKSFailure indicates something failed, but that we are likely talking to a",
"# proxy",
"return",
"isinstance",
"(",
"sock",
",",
"SOCKSFailure",
")"
] |
Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
|
[
"Return",
"True",
"if",
"it",
"appears",
"we",
"can",
"connect",
"to",
"a",
"SOCKS",
"proxy",
"otherwise",
"False",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L344-L360
|
18,311
|
kyuupichan/aiorpcX
|
aiorpcx/socks.py
|
SOCKSProxy.auto_detect_at_host
|
async def auto_detect_at_host(cls, host, ports, auth):
'''Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
'''
for port in ports:
proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth)
if proxy:
return proxy
return None
|
python
|
async def auto_detect_at_host(cls, host, ports, auth):
'''Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
'''
for port in ports:
proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth)
if proxy:
return proxy
return None
|
[
"async",
"def",
"auto_detect_at_host",
"(",
"cls",
",",
"host",
",",
"ports",
",",
"auth",
")",
":",
"for",
"port",
"in",
"ports",
":",
"proxy",
"=",
"await",
"cls",
".",
"auto_detect_at_address",
"(",
"NetAddress",
"(",
"host",
",",
"port",
")",
",",
"auth",
")",
"if",
"proxy",
":",
"return",
"proxy",
"return",
"None"
] |
Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
|
[
"Try",
"to",
"detect",
"a",
"SOCKS",
"proxy",
"on",
"a",
"host",
"on",
"one",
"of",
"the",
"ports",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L380-L393
|
18,312
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
Connector.create_connection
|
async def create_connection(self):
'''Initiate a connection.'''
connector = self.proxy or self.loop
return await connector.create_connection(
self.session_factory, self.host, self.port, **self.kwargs)
|
python
|
async def create_connection(self):
'''Initiate a connection.'''
connector = self.proxy or self.loop
return await connector.create_connection(
self.session_factory, self.host, self.port, **self.kwargs)
|
[
"async",
"def",
"create_connection",
"(",
"self",
")",
":",
"connector",
"=",
"self",
".",
"proxy",
"or",
"self",
".",
"loop",
"return",
"await",
"connector",
".",
"create_connection",
"(",
"self",
".",
"session_factory",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"*",
"*",
"self",
".",
"kwargs",
")"
] |
Initiate a connection.
|
[
"Initiate",
"a",
"connection",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L73-L77
|
18,313
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.data_received
|
def data_received(self, framed_message):
'''Called by asyncio when a message comes in.'''
if self.verbosity >= 4:
self.logger.debug(f'Received framed message {framed_message}')
self.recv_size += len(framed_message)
self.bump_cost(len(framed_message) * self.bw_cost_per_byte)
self.framer.received_bytes(framed_message)
|
python
|
def data_received(self, framed_message):
'''Called by asyncio when a message comes in.'''
if self.verbosity >= 4:
self.logger.debug(f'Received framed message {framed_message}')
self.recv_size += len(framed_message)
self.bump_cost(len(framed_message) * self.bw_cost_per_byte)
self.framer.received_bytes(framed_message)
|
[
"def",
"data_received",
"(",
"self",
",",
"framed_message",
")",
":",
"if",
"self",
".",
"verbosity",
">=",
"4",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f'Received framed message {framed_message}'",
")",
"self",
".",
"recv_size",
"+=",
"len",
"(",
"framed_message",
")",
"self",
".",
"bump_cost",
"(",
"len",
"(",
"framed_message",
")",
"*",
"self",
".",
"bw_cost_per_byte",
")",
"self",
".",
"framer",
".",
"received_bytes",
"(",
"framed_message",
")"
] |
Called by asyncio when a message comes in.
|
[
"Called",
"by",
"asyncio",
"when",
"a",
"message",
"comes",
"in",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L224-L230
|
18,314
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.pause_writing
|
def pause_writing(self):
'''Transport calls when the send buffer is full.'''
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading()
|
python
|
def pause_writing(self):
'''Transport calls when the send buffer is full.'''
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading()
|
[
"def",
"pause_writing",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_closing",
"(",
")",
":",
"self",
".",
"_can_send",
".",
"clear",
"(",
")",
"self",
".",
"transport",
".",
"pause_reading",
"(",
")"
] |
Transport calls when the send buffer is full.
|
[
"Transport",
"calls",
"when",
"the",
"send",
"buffer",
"is",
"full",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L232-L236
|
18,315
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.resume_writing
|
def resume_writing(self):
'''Transport calls when the send buffer has room.'''
if not self._can_send.is_set():
self._can_send.set()
self.transport.resume_reading()
|
python
|
def resume_writing(self):
'''Transport calls when the send buffer has room.'''
if not self._can_send.is_set():
self._can_send.set()
self.transport.resume_reading()
|
[
"def",
"resume_writing",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_can_send",
".",
"is_set",
"(",
")",
":",
"self",
".",
"_can_send",
".",
"set",
"(",
")",
"self",
".",
"transport",
".",
"resume_reading",
"(",
")"
] |
Transport calls when the send buffer has room.
|
[
"Transport",
"calls",
"when",
"the",
"send",
"buffer",
"has",
"room",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L238-L242
|
18,316
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.connection_made
|
def connection_made(self, transport):
'''Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.'''
self.transport = transport
# If the Socks proxy was used then _proxy and _remote_address are already set
if self._proxy is None:
# This would throw if called on a closed SSL transport. Fixed in asyncio in
# Python 3.6.1 and 3.5.4
peername = transport.get_extra_info('peername')
self._remote_address = NetAddress(peername[0], peername[1])
self._task = spawn_sync(self._process_messages(), loop=self.loop)
|
python
|
def connection_made(self, transport):
'''Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.'''
self.transport = transport
# If the Socks proxy was used then _proxy and _remote_address are already set
if self._proxy is None:
# This would throw if called on a closed SSL transport. Fixed in asyncio in
# Python 3.6.1 and 3.5.4
peername = transport.get_extra_info('peername')
self._remote_address = NetAddress(peername[0], peername[1])
self._task = spawn_sync(self._process_messages(), loop=self.loop)
|
[
"def",
"connection_made",
"(",
"self",
",",
"transport",
")",
":",
"self",
".",
"transport",
"=",
"transport",
"# If the Socks proxy was used then _proxy and _remote_address are already set",
"if",
"self",
".",
"_proxy",
"is",
"None",
":",
"# This would throw if called on a closed SSL transport. Fixed in asyncio in",
"# Python 3.6.1 and 3.5.4",
"peername",
"=",
"transport",
".",
"get_extra_info",
"(",
"'peername'",
")",
"self",
".",
"_remote_address",
"=",
"NetAddress",
"(",
"peername",
"[",
"0",
"]",
",",
"peername",
"[",
"1",
"]",
")",
"self",
".",
"_task",
"=",
"spawn_sync",
"(",
"self",
".",
"_process_messages",
"(",
")",
",",
"loop",
"=",
"self",
".",
"loop",
")"
] |
Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.
|
[
"Called",
"by",
"asyncio",
"when",
"a",
"connection",
"is",
"established",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L244-L255
|
18,317
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.connection_lost
|
def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel)
|
python
|
def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel)
|
[
"def",
"connection_lost",
"(",
"self",
",",
"exc",
")",
":",
"# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246",
"if",
"self",
".",
"transport",
":",
"self",
".",
"transport",
"=",
"None",
"self",
".",
"closed_event",
".",
"set",
"(",
")",
"# Release waiting tasks",
"self",
".",
"_can_send",
".",
"set",
"(",
")",
"# Cancelling directly leads to self-cancellation problems for member",
"# functions await-ing self.close()",
"self",
".",
"loop",
".",
"call_soon",
"(",
"self",
".",
"_task",
".",
"cancel",
")"
] |
Called by asyncio when the connection closes.
Tear down things done in connection_made.
|
[
"Called",
"by",
"asyncio",
"when",
"the",
"connection",
"closes",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L257-L269
|
18,318
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.recalc_concurrency
|
def recalc_concurrency(self):
'''Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
'''
# Refund resource usage proportionally to elapsed time; the bump passed is negative
now = time.time()
self.cost = max(0, self.cost - (now - self._cost_time) * self.cost_decay_per_sec)
self._cost_time = now
self._cost_last = self.cost
# Setting cost_hard_limit <= 0 means to not limit concurrency
value = self._incoming_concurrency.max_concurrent
cost_soft_range = self.cost_hard_limit - self.cost_soft_limit
if cost_soft_range <= 0:
return
cost = self.cost + self.extra_cost()
self._cost_fraction = max(0.0, (cost - self.cost_soft_limit) / cost_soft_range)
target = max(0, ceil((1.0 - self._cost_fraction) * self.initial_concurrent))
if abs(target - value) > 1:
self.logger.info(f'changing task concurrency from {value} to {target}')
self._incoming_concurrency.set_target(target)
|
python
|
def recalc_concurrency(self):
'''Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
'''
# Refund resource usage proportionally to elapsed time; the bump passed is negative
now = time.time()
self.cost = max(0, self.cost - (now - self._cost_time) * self.cost_decay_per_sec)
self._cost_time = now
self._cost_last = self.cost
# Setting cost_hard_limit <= 0 means to not limit concurrency
value = self._incoming_concurrency.max_concurrent
cost_soft_range = self.cost_hard_limit - self.cost_soft_limit
if cost_soft_range <= 0:
return
cost = self.cost + self.extra_cost()
self._cost_fraction = max(0.0, (cost - self.cost_soft_limit) / cost_soft_range)
target = max(0, ceil((1.0 - self._cost_fraction) * self.initial_concurrent))
if abs(target - value) > 1:
self.logger.info(f'changing task concurrency from {value} to {target}')
self._incoming_concurrency.set_target(target)
|
[
"def",
"recalc_concurrency",
"(",
"self",
")",
":",
"# Refund resource usage proportionally to elapsed time; the bump passed is negative",
"now",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"cost",
"=",
"max",
"(",
"0",
",",
"self",
".",
"cost",
"-",
"(",
"now",
"-",
"self",
".",
"_cost_time",
")",
"*",
"self",
".",
"cost_decay_per_sec",
")",
"self",
".",
"_cost_time",
"=",
"now",
"self",
".",
"_cost_last",
"=",
"self",
".",
"cost",
"# Setting cost_hard_limit <= 0 means to not limit concurrency",
"value",
"=",
"self",
".",
"_incoming_concurrency",
".",
"max_concurrent",
"cost_soft_range",
"=",
"self",
".",
"cost_hard_limit",
"-",
"self",
".",
"cost_soft_limit",
"if",
"cost_soft_range",
"<=",
"0",
":",
"return",
"cost",
"=",
"self",
".",
"cost",
"+",
"self",
".",
"extra_cost",
"(",
")",
"self",
".",
"_cost_fraction",
"=",
"max",
"(",
"0.0",
",",
"(",
"cost",
"-",
"self",
".",
"cost_soft_limit",
")",
"/",
"cost_soft_range",
")",
"target",
"=",
"max",
"(",
"0",
",",
"ceil",
"(",
"(",
"1.0",
"-",
"self",
".",
"_cost_fraction",
")",
"*",
"self",
".",
"initial_concurrent",
")",
")",
"if",
"abs",
"(",
"target",
"-",
"value",
")",
">",
"1",
":",
"self",
".",
"logger",
".",
"info",
"(",
"f'changing task concurrency from {value} to {target}'",
")",
"self",
".",
"_incoming_concurrency",
".",
"set_target",
"(",
"target",
")"
] |
Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
|
[
"Call",
"to",
"recalculate",
"sleeps",
"and",
"concurrency",
"for",
"the",
"session",
".",
"Called",
"automatically",
"if",
"cost",
"has",
"drifted",
"significantly",
".",
"Otherwise",
"can",
"be",
"called",
"at",
"regular",
"intervals",
"if",
"desired",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L282-L305
|
18,319
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
SessionBase.close
|
async def close(self, *, force_after=30):
'''Close the connection and return when closed.'''
if self.transport:
self.transport.close()
try:
async with timeout_after(force_after):
await self.closed_event.wait()
except TaskTimeout:
self.abort()
await self.closed_event.wait()
|
python
|
async def close(self, *, force_after=30):
'''Close the connection and return when closed.'''
if self.transport:
self.transport.close()
try:
async with timeout_after(force_after):
await self.closed_event.wait()
except TaskTimeout:
self.abort()
await self.closed_event.wait()
|
[
"async",
"def",
"close",
"(",
"self",
",",
"*",
",",
"force_after",
"=",
"30",
")",
":",
"if",
"self",
".",
"transport",
":",
"self",
".",
"transport",
".",
"close",
"(",
")",
"try",
":",
"async",
"with",
"timeout_after",
"(",
"force_after",
")",
":",
"await",
"self",
".",
"closed_event",
".",
"wait",
"(",
")",
"except",
"TaskTimeout",
":",
"self",
".",
"abort",
"(",
")",
"await",
"self",
".",
"closed_event",
".",
"wait",
"(",
")"
] |
Close the connection and return when closed.
|
[
"Close",
"the",
"connection",
"and",
"return",
"when",
"closed",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L342-L351
|
18,320
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
RPCSession.send_request
|
async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1)
|
python
|
async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1)
|
[
"async",
"def",
"send_request",
"(",
"self",
",",
"method",
",",
"args",
"=",
"(",
")",
")",
":",
"message",
",",
"event",
"=",
"self",
".",
"connection",
".",
"send_request",
"(",
"Request",
"(",
"method",
",",
"args",
")",
")",
"return",
"await",
"self",
".",
"_send_concurrent",
"(",
"message",
",",
"event",
",",
"1",
")"
] |
Send an RPC request over the network.
|
[
"Send",
"an",
"RPC",
"request",
"over",
"the",
"network",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L631-L634
|
18,321
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
RPCSession.send_notification
|
async def send_notification(self, method, args=()):
'''Send an RPC notification over the network.'''
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message)
|
python
|
async def send_notification(self, method, args=()):
'''Send an RPC notification over the network.'''
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message)
|
[
"async",
"def",
"send_notification",
"(",
"self",
",",
"method",
",",
"args",
"=",
"(",
")",
")",
":",
"message",
"=",
"self",
".",
"connection",
".",
"send_notification",
"(",
"Notification",
"(",
"method",
",",
"args",
")",
")",
"await",
"self",
".",
"_send_message",
"(",
"message",
")"
] |
Send an RPC notification over the network.
|
[
"Send",
"an",
"RPC",
"notification",
"over",
"the",
"network",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L636-L639
|
18,322
|
kyuupichan/aiorpcX
|
aiorpcx/session.py
|
Server.close
|
async def close(self):
'''Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
'''
if self.server:
self.server.close()
await self.server.wait_closed()
self.server = None
|
python
|
async def close(self):
'''Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
'''
if self.server:
self.server.close()
await self.server.wait_closed()
self.server = None
|
[
"async",
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"server",
":",
"self",
".",
"server",
".",
"close",
"(",
")",
"await",
"self",
".",
"server",
".",
"wait_closed",
"(",
")",
"self",
".",
"server",
"=",
"None"
] |
Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
|
[
"Close",
"the",
"listening",
"socket",
".",
"This",
"does",
"not",
"close",
"any",
"ServerSession",
"objects",
"created",
"to",
"handle",
"incoming",
"connections",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L674-L681
|
18,323
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPC._message_to_payload
|
def _message_to_payload(cls, message):
'''Returns a Python object or a ProtocolError.'''
try:
return json.loads(message.decode())
except UnicodeDecodeError:
message = 'messages must be encoded in UTF-8'
except json.JSONDecodeError:
message = 'invalid JSON'
raise cls._error(cls.PARSE_ERROR, message, True, None)
|
python
|
def _message_to_payload(cls, message):
'''Returns a Python object or a ProtocolError.'''
try:
return json.loads(message.decode())
except UnicodeDecodeError:
message = 'messages must be encoded in UTF-8'
except json.JSONDecodeError:
message = 'invalid JSON'
raise cls._error(cls.PARSE_ERROR, message, True, None)
|
[
"def",
"_message_to_payload",
"(",
"cls",
",",
"message",
")",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"message",
".",
"decode",
"(",
")",
")",
"except",
"UnicodeDecodeError",
":",
"message",
"=",
"'messages must be encoded in UTF-8'",
"except",
"json",
".",
"JSONDecodeError",
":",
"message",
"=",
"'invalid JSON'",
"raise",
"cls",
".",
"_error",
"(",
"cls",
".",
"PARSE_ERROR",
",",
"message",
",",
"True",
",",
"None",
")"
] |
Returns a Python object or a ProtocolError.
|
[
"Returns",
"a",
"Python",
"object",
"or",
"a",
"ProtocolError",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L223-L231
|
18,324
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPC.batch_message
|
def batch_message(cls, batch, request_ids):
'''Convert a request Batch to a message.'''
assert isinstance(batch, Batch)
if not cls.allow_batches:
raise ProtocolError.invalid_request(
'protocol does not permit batches')
id_iter = iter(request_ids)
rm = cls.request_message
nm = cls.notification_message
parts = (rm(request, next(id_iter)) if isinstance(request, Request)
else nm(request) for request in batch)
return cls.batch_message_from_parts(parts)
|
python
|
def batch_message(cls, batch, request_ids):
'''Convert a request Batch to a message.'''
assert isinstance(batch, Batch)
if not cls.allow_batches:
raise ProtocolError.invalid_request(
'protocol does not permit batches')
id_iter = iter(request_ids)
rm = cls.request_message
nm = cls.notification_message
parts = (rm(request, next(id_iter)) if isinstance(request, Request)
else nm(request) for request in batch)
return cls.batch_message_from_parts(parts)
|
[
"def",
"batch_message",
"(",
"cls",
",",
"batch",
",",
"request_ids",
")",
":",
"assert",
"isinstance",
"(",
"batch",
",",
"Batch",
")",
"if",
"not",
"cls",
".",
"allow_batches",
":",
"raise",
"ProtocolError",
".",
"invalid_request",
"(",
"'protocol does not permit batches'",
")",
"id_iter",
"=",
"iter",
"(",
"request_ids",
")",
"rm",
"=",
"cls",
".",
"request_message",
"nm",
"=",
"cls",
".",
"notification_message",
"parts",
"=",
"(",
"rm",
"(",
"request",
",",
"next",
"(",
"id_iter",
")",
")",
"if",
"isinstance",
"(",
"request",
",",
"Request",
")",
"else",
"nm",
"(",
"request",
")",
"for",
"request",
"in",
"batch",
")",
"return",
"cls",
".",
"batch_message_from_parts",
"(",
"parts",
")"
] |
Convert a request Batch to a message.
|
[
"Convert",
"a",
"request",
"Batch",
"to",
"a",
"message",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L305-L316
|
18,325
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPC.batch_message_from_parts
|
def batch_message_from_parts(cls, messages):
'''Convert messages, one per batch item, into a batch message. At
least one message must be passed.
'''
# Comma-separate the messages and wrap the lot in square brackets
middle = b', '.join(messages)
if not middle:
raise ProtocolError.empty_batch()
return b''.join([b'[', middle, b']'])
|
python
|
def batch_message_from_parts(cls, messages):
'''Convert messages, one per batch item, into a batch message. At
least one message must be passed.
'''
# Comma-separate the messages and wrap the lot in square brackets
middle = b', '.join(messages)
if not middle:
raise ProtocolError.empty_batch()
return b''.join([b'[', middle, b']'])
|
[
"def",
"batch_message_from_parts",
"(",
"cls",
",",
"messages",
")",
":",
"# Comma-separate the messages and wrap the lot in square brackets",
"middle",
"=",
"b', '",
".",
"join",
"(",
"messages",
")",
"if",
"not",
"middle",
":",
"raise",
"ProtocolError",
".",
"empty_batch",
"(",
")",
"return",
"b''",
".",
"join",
"(",
"[",
"b'['",
",",
"middle",
",",
"b']'",
"]",
")"
] |
Convert messages, one per batch item, into a batch message. At
least one message must be passed.
|
[
"Convert",
"messages",
"one",
"per",
"batch",
"item",
"into",
"a",
"batch",
"message",
".",
"At",
"least",
"one",
"message",
"must",
"be",
"passed",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L319-L327
|
18,326
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPC.encode_payload
|
def encode_payload(cls, payload):
'''Encode a Python object as JSON and convert it to bytes.'''
try:
return json.dumps(payload).encode()
except TypeError:
msg = f'JSON payload encoding error: {payload}'
raise ProtocolError(cls.INTERNAL_ERROR, msg) from None
|
python
|
def encode_payload(cls, payload):
'''Encode a Python object as JSON and convert it to bytes.'''
try:
return json.dumps(payload).encode()
except TypeError:
msg = f'JSON payload encoding error: {payload}'
raise ProtocolError(cls.INTERNAL_ERROR, msg) from None
|
[
"def",
"encode_payload",
"(",
"cls",
",",
"payload",
")",
":",
"try",
":",
"return",
"json",
".",
"dumps",
"(",
"payload",
")",
".",
"encode",
"(",
")",
"except",
"TypeError",
":",
"msg",
"=",
"f'JSON payload encoding error: {payload}'",
"raise",
"ProtocolError",
"(",
"cls",
".",
"INTERNAL_ERROR",
",",
"msg",
")",
"from",
"None"
] |
Encode a Python object as JSON and convert it to bytes.
|
[
"Encode",
"a",
"Python",
"object",
"as",
"JSON",
"and",
"convert",
"it",
"to",
"bytes",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L330-L336
|
18,327
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPCAutoDetect.detect_protocol
|
def detect_protocol(cls, message):
'''Attempt to detect the protocol from the message.'''
main = cls._message_to_payload(message)
def protocol_for_payload(payload):
if not isinstance(payload, dict):
return JSONRPCLoose # Will error
# Obey an explicit "jsonrpc"
version = payload.get('jsonrpc')
if version == '2.0':
return JSONRPCv2
if version == '1.0':
return JSONRPCv1
# Now to decide between JSONRPCLoose and JSONRPCv1 if possible
if 'result' in payload and 'error' in payload:
return JSONRPCv1
return JSONRPCLoose
if isinstance(main, list):
parts = set(protocol_for_payload(payload) for payload in main)
# If all same protocol, return it
if len(parts) == 1:
return parts.pop()
# If strict protocol detected, return it, preferring JSONRPCv2.
# This means a batch of JSONRPCv1 will fail
for protocol in (JSONRPCv2, JSONRPCv1):
if protocol in parts:
return protocol
# Will error if no parts
return JSONRPCLoose
return protocol_for_payload(main)
|
python
|
def detect_protocol(cls, message):
'''Attempt to detect the protocol from the message.'''
main = cls._message_to_payload(message)
def protocol_for_payload(payload):
if not isinstance(payload, dict):
return JSONRPCLoose # Will error
# Obey an explicit "jsonrpc"
version = payload.get('jsonrpc')
if version == '2.0':
return JSONRPCv2
if version == '1.0':
return JSONRPCv1
# Now to decide between JSONRPCLoose and JSONRPCv1 if possible
if 'result' in payload and 'error' in payload:
return JSONRPCv1
return JSONRPCLoose
if isinstance(main, list):
parts = set(protocol_for_payload(payload) for payload in main)
# If all same protocol, return it
if len(parts) == 1:
return parts.pop()
# If strict protocol detected, return it, preferring JSONRPCv2.
# This means a batch of JSONRPCv1 will fail
for protocol in (JSONRPCv2, JSONRPCv1):
if protocol in parts:
return protocol
# Will error if no parts
return JSONRPCLoose
return protocol_for_payload(main)
|
[
"def",
"detect_protocol",
"(",
"cls",
",",
"message",
")",
":",
"main",
"=",
"cls",
".",
"_message_to_payload",
"(",
"message",
")",
"def",
"protocol_for_payload",
"(",
"payload",
")",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"dict",
")",
":",
"return",
"JSONRPCLoose",
"# Will error",
"# Obey an explicit \"jsonrpc\"",
"version",
"=",
"payload",
".",
"get",
"(",
"'jsonrpc'",
")",
"if",
"version",
"==",
"'2.0'",
":",
"return",
"JSONRPCv2",
"if",
"version",
"==",
"'1.0'",
":",
"return",
"JSONRPCv1",
"# Now to decide between JSONRPCLoose and JSONRPCv1 if possible",
"if",
"'result'",
"in",
"payload",
"and",
"'error'",
"in",
"payload",
":",
"return",
"JSONRPCv1",
"return",
"JSONRPCLoose",
"if",
"isinstance",
"(",
"main",
",",
"list",
")",
":",
"parts",
"=",
"set",
"(",
"protocol_for_payload",
"(",
"payload",
")",
"for",
"payload",
"in",
"main",
")",
"# If all same protocol, return it",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"return",
"parts",
".",
"pop",
"(",
")",
"# If strict protocol detected, return it, preferring JSONRPCv2.",
"# This means a batch of JSONRPCv1 will fail",
"for",
"protocol",
"in",
"(",
"JSONRPCv2",
",",
"JSONRPCv1",
")",
":",
"if",
"protocol",
"in",
"parts",
":",
"return",
"protocol",
"# Will error if no parts",
"return",
"JSONRPCLoose",
"return",
"protocol_for_payload",
"(",
"main",
")"
] |
Attempt to detect the protocol from the message.
|
[
"Attempt",
"to",
"detect",
"the",
"protocol",
"from",
"the",
"message",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L544-L576
|
18,328
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPCConnection.receive_message
|
def receive_message(self, message):
'''Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
'''
if self._protocol is JSONRPCAutoDetect:
self._protocol = JSONRPCAutoDetect.detect_protocol(message)
try:
item, request_id = self._protocol.message_to_item(message)
except ProtocolError as e:
if e.response_msg_id is not id:
return self._receive_response(e, e.response_msg_id)
raise
if isinstance(item, Request):
item.send_result = partial(self._send_result, request_id)
return [item]
if isinstance(item, Notification):
return [item]
if isinstance(item, Response):
return self._receive_response(item.result, request_id)
assert isinstance(item, list)
if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload)
for payload in item):
return self._receive_response_batch(item)
else:
return self._receive_request_batch(item)
|
python
|
def receive_message(self, message):
'''Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
'''
if self._protocol is JSONRPCAutoDetect:
self._protocol = JSONRPCAutoDetect.detect_protocol(message)
try:
item, request_id = self._protocol.message_to_item(message)
except ProtocolError as e:
if e.response_msg_id is not id:
return self._receive_response(e, e.response_msg_id)
raise
if isinstance(item, Request):
item.send_result = partial(self._send_result, request_id)
return [item]
if isinstance(item, Notification):
return [item]
if isinstance(item, Response):
return self._receive_response(item.result, request_id)
assert isinstance(item, list)
if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload)
for payload in item):
return self._receive_response_batch(item)
else:
return self._receive_request_batch(item)
|
[
"def",
"receive_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"_protocol",
"is",
"JSONRPCAutoDetect",
":",
"self",
".",
"_protocol",
"=",
"JSONRPCAutoDetect",
".",
"detect_protocol",
"(",
"message",
")",
"try",
":",
"item",
",",
"request_id",
"=",
"self",
".",
"_protocol",
".",
"message_to_item",
"(",
"message",
")",
"except",
"ProtocolError",
"as",
"e",
":",
"if",
"e",
".",
"response_msg_id",
"is",
"not",
"id",
":",
"return",
"self",
".",
"_receive_response",
"(",
"e",
",",
"e",
".",
"response_msg_id",
")",
"raise",
"if",
"isinstance",
"(",
"item",
",",
"Request",
")",
":",
"item",
".",
"send_result",
"=",
"partial",
"(",
"self",
".",
"_send_result",
",",
"request_id",
")",
"return",
"[",
"item",
"]",
"if",
"isinstance",
"(",
"item",
",",
"Notification",
")",
":",
"return",
"[",
"item",
"]",
"if",
"isinstance",
"(",
"item",
",",
"Response",
")",
":",
"return",
"self",
".",
"_receive_response",
"(",
"item",
".",
"result",
",",
"request_id",
")",
"assert",
"isinstance",
"(",
"item",
",",
"list",
")",
"if",
"all",
"(",
"isinstance",
"(",
"payload",
",",
"dict",
")",
"and",
"(",
"'result'",
"in",
"payload",
"or",
"'error'",
"in",
"payload",
")",
"for",
"payload",
"in",
"item",
")",
":",
"return",
"self",
".",
"_receive_response_batch",
"(",
"item",
")",
"else",
":",
"return",
"self",
".",
"_receive_request_batch",
"(",
"item",
")"
] |
Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
|
[
"Call",
"with",
"an",
"unframed",
"message",
"received",
"from",
"the",
"network",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L706-L737
|
18,329
|
kyuupichan/aiorpcX
|
aiorpcx/jsonrpc.py
|
JSONRPCConnection.cancel_pending_requests
|
def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear()
|
python
|
def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear()
|
[
"def",
"cancel_pending_requests",
"(",
"self",
")",
":",
"exception",
"=",
"CancelledError",
"(",
")",
"for",
"_request",
",",
"event",
"in",
"self",
".",
"_requests",
".",
"values",
"(",
")",
":",
"event",
".",
"result",
"=",
"exception",
"event",
".",
"set",
"(",
")",
"self",
".",
"_requests",
".",
"clear",
"(",
")"
] |
Cancel all pending requests.
|
[
"Cancel",
"all",
"pending",
"requests",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L739-L745
|
18,330
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
is_valid_hostname
|
def is_valid_hostname(hostname):
'''Return True if hostname is valid, otherwise False.'''
if not isinstance(hostname, str):
raise TypeError('hostname must be a string')
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
if not hostname or len(hostname) > 253:
return False
labels = hostname.split('.')
# the TLD must be not all-numeric
if re.match(NUMERIC_REGEX, labels[-1]):
return False
return all(LABEL_REGEX.match(label) for label in labels)
|
python
|
def is_valid_hostname(hostname):
'''Return True if hostname is valid, otherwise False.'''
if not isinstance(hostname, str):
raise TypeError('hostname must be a string')
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
if not hostname or len(hostname) > 253:
return False
labels = hostname.split('.')
# the TLD must be not all-numeric
if re.match(NUMERIC_REGEX, labels[-1]):
return False
return all(LABEL_REGEX.match(label) for label in labels)
|
[
"def",
"is_valid_hostname",
"(",
"hostname",
")",
":",
"if",
"not",
"isinstance",
"(",
"hostname",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'hostname must be a string'",
")",
"# strip exactly one dot from the right, if present",
"if",
"hostname",
"and",
"hostname",
"[",
"-",
"1",
"]",
"==",
"\".\"",
":",
"hostname",
"=",
"hostname",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"hostname",
"or",
"len",
"(",
"hostname",
")",
">",
"253",
":",
"return",
"False",
"labels",
"=",
"hostname",
".",
"split",
"(",
"'.'",
")",
"# the TLD must be not all-numeric",
"if",
"re",
".",
"match",
"(",
"NUMERIC_REGEX",
",",
"labels",
"[",
"-",
"1",
"]",
")",
":",
"return",
"False",
"return",
"all",
"(",
"LABEL_REGEX",
".",
"match",
"(",
"label",
")",
"for",
"label",
"in",
"labels",
")"
] |
Return True if hostname is valid, otherwise False.
|
[
"Return",
"True",
"if",
"hostname",
"is",
"valid",
"otherwise",
"False",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L47-L60
|
18,331
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
classify_host
|
def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host)
|
python
|
def classify_host(host):
'''Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
'''
if isinstance(host, (IPv4Address, IPv6Address)):
return host
if is_valid_hostname(host):
return host
return ip_address(host)
|
[
"def",
"classify_host",
"(",
"host",
")",
":",
"if",
"isinstance",
"(",
"host",
",",
"(",
"IPv4Address",
",",
"IPv6Address",
")",
")",
":",
"return",
"host",
"if",
"is_valid_hostname",
"(",
"host",
")",
":",
"return",
"host",
"return",
"ip_address",
"(",
"host",
")"
] |
Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
|
[
"Host",
"is",
"an",
"IPv4Address",
"IPv6Address",
"or",
"a",
"string",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L63-L77
|
18,332
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
validate_port
|
def validate_port(port):
'''Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.'''
if not isinstance(port, (str, int)):
raise TypeError(f'port must be an integer or string: {port}')
if isinstance(port, str) and port.isdigit():
port = int(port)
if isinstance(port, int) and 0 < port <= 65535:
return port
raise ValueError(f'invalid port: {port}')
|
python
|
def validate_port(port):
'''Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.'''
if not isinstance(port, (str, int)):
raise TypeError(f'port must be an integer or string: {port}')
if isinstance(port, str) and port.isdigit():
port = int(port)
if isinstance(port, int) and 0 < port <= 65535:
return port
raise ValueError(f'invalid port: {port}')
|
[
"def",
"validate_port",
"(",
"port",
")",
":",
"if",
"not",
"isinstance",
"(",
"port",
",",
"(",
"str",
",",
"int",
")",
")",
":",
"raise",
"TypeError",
"(",
"f'port must be an integer or string: {port}'",
")",
"if",
"isinstance",
"(",
"port",
",",
"str",
")",
"and",
"port",
".",
"isdigit",
"(",
")",
":",
"port",
"=",
"int",
"(",
"port",
")",
"if",
"isinstance",
"(",
"port",
",",
"int",
")",
"and",
"0",
"<",
"port",
"<=",
"65535",
":",
"return",
"port",
"raise",
"ValueError",
"(",
"f'invalid port: {port}'",
")"
] |
Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.
|
[
"Validate",
"port",
"and",
"return",
"it",
"as",
"an",
"integer",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L80-L90
|
18,333
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
validate_protocol
|
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower()
|
python
|
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower()
|
[
"def",
"validate_protocol",
"(",
"protocol",
")",
":",
"if",
"not",
"re",
".",
"match",
"(",
"PROTOCOL_REGEX",
",",
"protocol",
")",
":",
"raise",
"ValueError",
"(",
"f'invalid protocol: {protocol}'",
")",
"return",
"protocol",
".",
"lower",
"(",
")"
] |
Validate a protocol, a string, and return it.
|
[
"Validate",
"a",
"protocol",
"a",
"string",
"and",
"return",
"it",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L93-L97
|
18,334
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
is_async_call
|
def is_async_call(func):
'''inspect.iscoroutinefunction that looks through partials.'''
while isinstance(func, partial):
func = func.func
return inspect.iscoroutinefunction(func)
|
python
|
def is_async_call(func):
'''inspect.iscoroutinefunction that looks through partials.'''
while isinstance(func, partial):
func = func.func
return inspect.iscoroutinefunction(func)
|
[
"def",
"is_async_call",
"(",
"func",
")",
":",
"while",
"isinstance",
"(",
"func",
",",
"partial",
")",
":",
"func",
"=",
"func",
".",
"func",
"return",
"inspect",
".",
"iscoroutinefunction",
"(",
"func",
")"
] |
inspect.iscoroutinefunction that looks through partials.
|
[
"inspect",
".",
"iscoroutinefunction",
"that",
"looks",
"through",
"partials",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L261-L265
|
18,335
|
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
Service.from_string
|
def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address)
|
python
|
def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address)
|
[
"def",
"from_string",
"(",
"cls",
",",
"string",
",",
"*",
",",
"default_func",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"string",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"f'service must be a string: {string}'",
")",
"parts",
"=",
"string",
".",
"split",
"(",
"'://'",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"protocol",
",",
"address",
"=",
"parts",
"else",
":",
"item",
",",
"=",
"parts",
"protocol",
"=",
"None",
"if",
"default_func",
":",
"if",
"default_func",
"(",
"item",
",",
"ServicePart",
".",
"HOST",
")",
"and",
"default_func",
"(",
"item",
",",
"ServicePart",
".",
"PORT",
")",
":",
"protocol",
",",
"address",
"=",
"item",
",",
"''",
"else",
":",
"protocol",
",",
"address",
"=",
"default_func",
"(",
"None",
",",
"ServicePart",
".",
"PROTOCOL",
")",
",",
"item",
"if",
"not",
"protocol",
":",
"raise",
"ValueError",
"(",
"f'invalid service string: {string}'",
")",
"if",
"default_func",
":",
"default_func",
"=",
"partial",
"(",
"default_func",
",",
"protocol",
".",
"lower",
"(",
")",
")",
"address",
"=",
"NetAddress",
".",
"from_string",
"(",
"address",
",",
"default_func",
"=",
"default_func",
")",
"return",
"cls",
"(",
"protocol",
",",
"address",
")"
] |
Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
|
[
"Construct",
"a",
"Service",
"from",
"a",
"string",
"."
] |
707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L218-L244
|
18,336
|
monarch-initiative/dipper
|
dipper/sources/OMIA.py
|
OMIA.scrub
|
def scrub(self):
"""
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
"""
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file']+'.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as fh:
filereader = io.TextIOWrapper(fh, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
return
|
python
|
def scrub(self):
"""
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
"""
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file']+'.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as fh:
filereader = io.TextIOWrapper(fh, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
return
|
[
"def",
"scrub",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"Scrubbing out the nasty characters that break our parser.\"",
")",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
")",
")",
"tmpfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
"+",
"'.tmp.gz'",
")",
")",
"tmp",
"=",
"gzip",
".",
"open",
"(",
"tmpfile",
",",
"'wb'",
")",
"du",
"=",
"DipperUtil",
"(",
")",
"with",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"as",
"fh",
":",
"filereader",
"=",
"io",
".",
"TextIOWrapper",
"(",
"fh",
",",
"newline",
"=",
"\"\"",
")",
"for",
"line",
"in",
"filereader",
":",
"line",
"=",
"du",
".",
"remove_control_characters",
"(",
"line",
")",
"+",
"'\\n'",
"tmp",
".",
"write",
"(",
"line",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"tmp",
".",
"close",
"(",
")",
"# TEC I do not like this at all. original data must be preserved as is.",
"# also may be heavy handed as chars which do not break the parser",
"# are stripped as well (i.e. tabs and newlines)",
"# move the temp file",
"LOG",
".",
"info",
"(",
"\"Replacing the original data with the scrubbed file.\"",
")",
"shutil",
".",
"move",
"(",
"tmpfile",
",",
"myfile",
")",
"return"
] |
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?i
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
|
[
"The",
"XML",
"file",
"seems",
"to",
"have",
"mixed",
"-",
"encoding",
";",
"we",
"scrub",
"out",
"the",
"control",
"characters",
"from",
"the",
"file",
"for",
"processing",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L202-L234
|
18,337
|
monarch-initiative/dipper
|
dipper/sources/OMIA.py
|
OMIA.process_associations
|
def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return
|
python
|
def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return
|
[
"def",
"process_associations",
"(",
"self",
",",
"limit",
")",
":",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'data'",
"]",
"[",
"'file'",
"]",
")",
")",
"f",
"=",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"filereader",
"=",
"io",
".",
"TextIOWrapper",
"(",
"f",
",",
"newline",
"=",
"\"\"",
")",
"filereader",
".",
"readline",
"(",
")",
"# remove the xml declaration line",
"for",
"event",
",",
"elem",
"in",
"ET",
".",
"iterparse",
"(",
"filereader",
")",
":",
"# iterparse is not deprecated",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Article_Breed'",
",",
"self",
".",
"_process_article_breed_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Article_Phene'",
",",
"self",
".",
"_process_article_phene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Breed_Phene'",
",",
"self",
".",
"_process_breed_phene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Lida_Links'",
",",
"self",
".",
"_process_lida_links_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Phene_Gene'",
",",
"self",
".",
"_process_phene_gene_row",
",",
"limit",
")",
"self",
".",
"process_xml_table",
"(",
"elem",
",",
"'Group_MPO'",
",",
"self",
".",
"_process_group_mpo_row",
",",
"limit",
")",
"f",
".",
"close",
"(",
")",
"return"
] |
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
|
[
"Loop",
"through",
"the",
"xml",
"file",
"and",
"process",
"the",
"article",
"-",
"breed",
"article",
"-",
"phene",
"breed",
"-",
"phene",
"phene",
"-",
"gene",
"associations",
"and",
"the",
"external",
"links",
"to",
"LIDA",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L362-L390
|
18,338
|
monarch-initiative/dipper
|
dipper/sources/OMIA.py
|
OMIA._process_article_phene_row
|
def _process_article_phene_row(self, row):
"""
Linking articles to species-specific phenes.
:param row:
:return:
"""
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(
article_id,
self.globaltt['is_about'], phenotype_id)
return
|
python
|
def _process_article_phene_row(self, row):
"""
Linking articles to species-specific phenes.
:param row:
:return:
"""
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(
article_id,
self.globaltt['is_about'], phenotype_id)
return
|
[
"def",
"_process_article_phene_row",
"(",
"self",
",",
"row",
")",
":",
"# article_id, phene_id, added_by",
"# look up the article in the hashmap",
"phenotype_id",
"=",
"self",
".",
"id_hash",
"[",
"'phene'",
"]",
".",
"get",
"(",
"row",
"[",
"'phene_id'",
"]",
")",
"article_id",
"=",
"self",
".",
"id_hash",
"[",
"'article'",
"]",
".",
"get",
"(",
"row",
"[",
"'article_id'",
"]",
")",
"omia_id",
"=",
"self",
".",
"_get_omia_id_from_phene_id",
"(",
"phenotype_id",
")",
"if",
"self",
".",
"test_mode",
"or",
"omia_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'disease'",
"]",
"or",
"phenotype_id",
"is",
"None",
"or",
"article_id",
"is",
"None",
":",
"return",
"# make a triple, where the article is about the phenotype",
"self",
".",
"graph",
".",
"addTriple",
"(",
"article_id",
",",
"self",
".",
"globaltt",
"[",
"'is_about'",
"]",
",",
"phenotype_id",
")",
"return"
] |
Linking articles to species-specific phenes.
:param row:
:return:
|
[
"Linking",
"articles",
"to",
"species",
"-",
"specific",
"phenes",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L645-L667
|
18,339
|
monarch-initiative/dipper
|
dipper/sources/OMIA.py
|
OMIA.filter_keep_phenotype_entry_ids
|
def filter_keep_phenotype_entry_ids(self, entry):
'''
doubt this should be kept
'''
omim_id = str(entry['mimNumber'])
otype = self.globaltt['obsolete']
if omim_id in self.omim_type:
otype = self.omim_type[omim_id]
if otype == self.globaltt['obsolete'] and omim_id in self.omim_replaced:
omim_id = self.omim_replaced[omim_id]
otype = self.omim_type[omim_id]
# else: # removed or multiple
if otype not in (
self.globaltt['Phenotype'], self.globaltt['has_affected_feature']):
omim_id = None
return omim_id
|
python
|
def filter_keep_phenotype_entry_ids(self, entry):
'''
doubt this should be kept
'''
omim_id = str(entry['mimNumber'])
otype = self.globaltt['obsolete']
if omim_id in self.omim_type:
otype = self.omim_type[omim_id]
if otype == self.globaltt['obsolete'] and omim_id in self.omim_replaced:
omim_id = self.omim_replaced[omim_id]
otype = self.omim_type[omim_id]
# else: # removed or multiple
if otype not in (
self.globaltt['Phenotype'], self.globaltt['has_affected_feature']):
omim_id = None
return omim_id
|
[
"def",
"filter_keep_phenotype_entry_ids",
"(",
"self",
",",
"entry",
")",
":",
"omim_id",
"=",
"str",
"(",
"entry",
"[",
"'mimNumber'",
"]",
")",
"otype",
"=",
"self",
".",
"globaltt",
"[",
"'obsolete'",
"]",
"if",
"omim_id",
"in",
"self",
".",
"omim_type",
":",
"otype",
"=",
"self",
".",
"omim_type",
"[",
"omim_id",
"]",
"if",
"otype",
"==",
"self",
".",
"globaltt",
"[",
"'obsolete'",
"]",
"and",
"omim_id",
"in",
"self",
".",
"omim_replaced",
":",
"omim_id",
"=",
"self",
".",
"omim_replaced",
"[",
"omim_id",
"]",
"otype",
"=",
"self",
".",
"omim_type",
"[",
"omim_id",
"]",
"# else: # removed or multiple",
"if",
"otype",
"not",
"in",
"(",
"self",
".",
"globaltt",
"[",
"'Phenotype'",
"]",
",",
"self",
".",
"globaltt",
"[",
"'has_affected_feature'",
"]",
")",
":",
"omim_id",
"=",
"None",
"return",
"omim_id"
] |
doubt this should be kept
|
[
"doubt",
"this",
"should",
"be",
"kept"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L825-L841
|
18,340
|
monarch-initiative/dipper
|
dipper/sources/ClinVarXML_alpha.py
|
make_spo
|
def make_spo(sub, prd, obj):
'''
Decorates the three given strings as a line of ntriples
'''
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are allways uri (unless a bnode)
# prd are allways uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = re.split(r':', sub)
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = re.split(r':', prd)
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
# object is a curie or bnode or literal [string|number]
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isnumeric():
objt = '"' + obj + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
return subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + ' .'
else:
LOG.error(
'Cant work with: <%s> %s , <%s> %s, %s',
subcuri, subid, prdcuri, prdid, objt)
return None
|
python
|
def make_spo(sub, prd, obj):
'''
Decorates the three given strings as a line of ntriples
'''
# To establish string as a curie and expand,
# we use a global curie_map(.yaml)
# sub are allways uri (unless a bnode)
# prd are allways uri (unless prd is 'a')
# should fail loudly if curie does not exist
if prd == 'a':
prd = 'rdf:type'
try:
(subcuri, subid) = re.split(r':', sub)
except Exception:
LOG.error("not a Subject Curie '%s'", sub)
raise ValueError
try:
(prdcuri, prdid) = re.split(r':', prd)
except Exception:
LOG.error("not a Predicate Curie '%s'", prd)
raise ValueError
objt = ''
# object is a curie or bnode or literal [string|number]
objcuri = None
match = re.match(CURIERE, obj)
if match is not None:
try:
(objcuri, objid) = re.split(r':', obj)
except ValueError:
match = None
if match is not None and objcuri in CURIEMAP:
objt = CURIEMAP[objcuri] + objid.strip()
# allow unexpanded bnodes in object
if objcuri != '_' or CURIEMAP[objcuri] != '_:b':
objt = '<' + objt + '>'
elif obj.isnumeric():
objt = '"' + obj + '"'
else:
# Literals may not contain the characters ", LF, CR '\'
# except in their escaped forms. internal quotes as well.
obj = obj.strip('"').replace('\\', '\\\\').replace('"', '\'')
obj = obj.replace('\n', '\\n').replace('\r', '\\r')
objt = '"' + obj + '"'
# allow unexpanded bnodes in subject
if subcuri is not None and subcuri in CURIEMAP and \
prdcuri is not None and prdcuri in CURIEMAP:
subjt = CURIEMAP[subcuri] + subid.strip()
if subcuri != '_' or CURIEMAP[subcuri] != '_:b':
subjt = '<' + subjt + '>'
return subjt + ' <' + CURIEMAP[prdcuri] + prdid.strip() + '> ' + objt + ' .'
else:
LOG.error(
'Cant work with: <%s> %s , <%s> %s, %s',
subcuri, subid, prdcuri, prdid, objt)
return None
|
[
"def",
"make_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
":",
"# To establish string as a curie and expand,",
"# we use a global curie_map(.yaml)",
"# sub are allways uri (unless a bnode)",
"# prd are allways uri (unless prd is 'a')",
"# should fail loudly if curie does not exist",
"if",
"prd",
"==",
"'a'",
":",
"prd",
"=",
"'rdf:type'",
"try",
":",
"(",
"subcuri",
",",
"subid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"sub",
")",
"except",
"Exception",
":",
"LOG",
".",
"error",
"(",
"\"not a Subject Curie '%s'\"",
",",
"sub",
")",
"raise",
"ValueError",
"try",
":",
"(",
"prdcuri",
",",
"prdid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"prd",
")",
"except",
"Exception",
":",
"LOG",
".",
"error",
"(",
"\"not a Predicate Curie '%s'\"",
",",
"prd",
")",
"raise",
"ValueError",
"objt",
"=",
"''",
"# object is a curie or bnode or literal [string|number]",
"objcuri",
"=",
"None",
"match",
"=",
"re",
".",
"match",
"(",
"CURIERE",
",",
"obj",
")",
"if",
"match",
"is",
"not",
"None",
":",
"try",
":",
"(",
"objcuri",
",",
"objid",
")",
"=",
"re",
".",
"split",
"(",
"r':'",
",",
"obj",
")",
"except",
"ValueError",
":",
"match",
"=",
"None",
"if",
"match",
"is",
"not",
"None",
"and",
"objcuri",
"in",
"CURIEMAP",
":",
"objt",
"=",
"CURIEMAP",
"[",
"objcuri",
"]",
"+",
"objid",
".",
"strip",
"(",
")",
"# allow unexpanded bnodes in object",
"if",
"objcuri",
"!=",
"'_'",
"or",
"CURIEMAP",
"[",
"objcuri",
"]",
"!=",
"'_:b'",
":",
"objt",
"=",
"'<'",
"+",
"objt",
"+",
"'>'",
"elif",
"obj",
".",
"isnumeric",
"(",
")",
":",
"objt",
"=",
"'\"'",
"+",
"obj",
"+",
"'\"'",
"else",
":",
"# Literals may not contain the characters \", LF, CR '\\'",
"# except in their escaped forms. internal quotes as well.",
"obj",
"=",
"obj",
".",
"strip",
"(",
"'\"'",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\''",
")",
"obj",
"=",
"obj",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"objt",
"=",
"'\"'",
"+",
"obj",
"+",
"'\"'",
"# allow unexpanded bnodes in subject",
"if",
"subcuri",
"is",
"not",
"None",
"and",
"subcuri",
"in",
"CURIEMAP",
"and",
"prdcuri",
"is",
"not",
"None",
"and",
"prdcuri",
"in",
"CURIEMAP",
":",
"subjt",
"=",
"CURIEMAP",
"[",
"subcuri",
"]",
"+",
"subid",
".",
"strip",
"(",
")",
"if",
"subcuri",
"!=",
"'_'",
"or",
"CURIEMAP",
"[",
"subcuri",
"]",
"!=",
"'_:b'",
":",
"subjt",
"=",
"'<'",
"+",
"subjt",
"+",
"'>'",
"return",
"subjt",
"+",
"' <'",
"+",
"CURIEMAP",
"[",
"prdcuri",
"]",
"+",
"prdid",
".",
"strip",
"(",
")",
"+",
"'> '",
"+",
"objt",
"+",
"' .'",
"else",
":",
"LOG",
".",
"error",
"(",
"'Cant work with: <%s> %s , <%s> %s, %s'",
",",
"subcuri",
",",
"subid",
",",
"prdcuri",
",",
"prdid",
",",
"objt",
")",
"return",
"None"
] |
Decorates the three given strings as a line of ntriples
|
[
"Decorates",
"the",
"three",
"given",
"strings",
"as",
"a",
"line",
"of",
"ntriples"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ClinVarXML_alpha.py#L183-L244
|
18,341
|
monarch-initiative/dipper
|
dipper/sources/ClinVarXML_alpha.py
|
write_spo
|
def write_spo(sub, prd, obj):
'''
write triples to a buffer incase we decide to drop them
'''
rcvtriples.append(make_spo(sub, prd, obj))
|
python
|
def write_spo(sub, prd, obj):
'''
write triples to a buffer incase we decide to drop them
'''
rcvtriples.append(make_spo(sub, prd, obj))
|
[
"def",
"write_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
":",
"rcvtriples",
".",
"append",
"(",
"make_spo",
"(",
"sub",
",",
"prd",
",",
"obj",
")",
")"
] |
write triples to a buffer incase we decide to drop them
|
[
"write",
"triples",
"to",
"a",
"buffer",
"incase",
"we",
"decide",
"to",
"drop",
"them"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ClinVarXML_alpha.py#L247-L251
|
18,342
|
monarch-initiative/dipper
|
dipper/sources/Decipher.py
|
Decipher.make_allele_by_consequence
|
def make_allele_by_consequence(self, consequence, gene_id, gene_symbol):
"""
Given a "consequence" label that describes a variation type,
create an anonymous variant of the specified gene as an instance of
that consequence type.
:param consequence:
:param gene_id:
:param gene_symbol:
:return: allele_id
"""
allele_id = None
# Loss of function : Nonsense, frame-shifting indel,
# essential splice site mutation, whole gene deletion or any other
# mutation where functional analysis demonstrates clear reduction
# or loss of function
# All missense/in frame : Where all the mutations described in the data
# source are either missense or in frame deletions and there is no
# evidence favoring either loss-of-function, activating or
# dominant negative effect
# Dominant negative : Mutation within one allele of a gene that creates
# a significantly greater deleterious effect on gene product
# function than a monoallelic loss of function mutation
# Activating : Mutation, usually missense that results in
# a constitutive functional activation of the gene product
# Increased gene dosage : Copy number variation that increases
# the functional dosage of the gene
# Cis-regulatory or promotor mutation : Mutation in cis-regulatory
# elements that lies outwith the known transcription unit and
# promotor of the controlled gene
# Uncertain : Where the exact nature of the mutation is unclear or
# not recorded
type_id = self.resolve(consequence, mandatory=False)
if type_id == consequence:
LOG.warning("Consequence type unmapped: %s", str(consequence))
type_id = self.globaltt['sequence_variant']
# make the allele
allele_id = ''.join((gene_id, type_id))
allele_id = re.sub(r':', '', allele_id)
allele_id = '_:'+allele_id # make this a BNode
allele_label = ' '.join((consequence, 'allele in', gene_symbol))
self.model.addIndividualToGraph(allele_id, allele_label, type_id)
self.geno.addAlleleOfGene(allele_id, gene_id)
return allele_id
|
python
|
def make_allele_by_consequence(self, consequence, gene_id, gene_symbol):
"""
Given a "consequence" label that describes a variation type,
create an anonymous variant of the specified gene as an instance of
that consequence type.
:param consequence:
:param gene_id:
:param gene_symbol:
:return: allele_id
"""
allele_id = None
# Loss of function : Nonsense, frame-shifting indel,
# essential splice site mutation, whole gene deletion or any other
# mutation where functional analysis demonstrates clear reduction
# or loss of function
# All missense/in frame : Where all the mutations described in the data
# source are either missense or in frame deletions and there is no
# evidence favoring either loss-of-function, activating or
# dominant negative effect
# Dominant negative : Mutation within one allele of a gene that creates
# a significantly greater deleterious effect on gene product
# function than a monoallelic loss of function mutation
# Activating : Mutation, usually missense that results in
# a constitutive functional activation of the gene product
# Increased gene dosage : Copy number variation that increases
# the functional dosage of the gene
# Cis-regulatory or promotor mutation : Mutation in cis-regulatory
# elements that lies outwith the known transcription unit and
# promotor of the controlled gene
# Uncertain : Where the exact nature of the mutation is unclear or
# not recorded
type_id = self.resolve(consequence, mandatory=False)
if type_id == consequence:
LOG.warning("Consequence type unmapped: %s", str(consequence))
type_id = self.globaltt['sequence_variant']
# make the allele
allele_id = ''.join((gene_id, type_id))
allele_id = re.sub(r':', '', allele_id)
allele_id = '_:'+allele_id # make this a BNode
allele_label = ' '.join((consequence, 'allele in', gene_symbol))
self.model.addIndividualToGraph(allele_id, allele_label, type_id)
self.geno.addAlleleOfGene(allele_id, gene_id)
return allele_id
|
[
"def",
"make_allele_by_consequence",
"(",
"self",
",",
"consequence",
",",
"gene_id",
",",
"gene_symbol",
")",
":",
"allele_id",
"=",
"None",
"# Loss of function : Nonsense, frame-shifting indel,",
"# essential splice site mutation, whole gene deletion or any other",
"# mutation where functional analysis demonstrates clear reduction",
"# or loss of function",
"# All missense/in frame : Where all the mutations described in the data",
"# source are either missense or in frame deletions and there is no",
"# evidence favoring either loss-of-function, activating or",
"# dominant negative effect",
"# Dominant negative : Mutation within one allele of a gene that creates",
"# a significantly greater deleterious effect on gene product",
"# function than a monoallelic loss of function mutation",
"# Activating : Mutation, usually missense that results in",
"# a constitutive functional activation of the gene product",
"# Increased gene dosage : Copy number variation that increases",
"# the functional dosage of the gene",
"# Cis-regulatory or promotor mutation : Mutation in cis-regulatory",
"# elements that lies outwith the known transcription unit and",
"# promotor of the controlled gene",
"# Uncertain : Where the exact nature of the mutation is unclear or",
"# not recorded",
"type_id",
"=",
"self",
".",
"resolve",
"(",
"consequence",
",",
"mandatory",
"=",
"False",
")",
"if",
"type_id",
"==",
"consequence",
":",
"LOG",
".",
"warning",
"(",
"\"Consequence type unmapped: %s\"",
",",
"str",
"(",
"consequence",
")",
")",
"type_id",
"=",
"self",
".",
"globaltt",
"[",
"'sequence_variant'",
"]",
"# make the allele",
"allele_id",
"=",
"''",
".",
"join",
"(",
"(",
"gene_id",
",",
"type_id",
")",
")",
"allele_id",
"=",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"allele_id",
")",
"allele_id",
"=",
"'_:'",
"+",
"allele_id",
"# make this a BNode",
"allele_label",
"=",
"' '",
".",
"join",
"(",
"(",
"consequence",
",",
"'allele in'",
",",
"gene_symbol",
")",
")",
"self",
".",
"model",
".",
"addIndividualToGraph",
"(",
"allele_id",
",",
"allele_label",
",",
"type_id",
")",
"self",
".",
"geno",
".",
"addAlleleOfGene",
"(",
"allele_id",
",",
"gene_id",
")",
"return",
"allele_id"
] |
Given a "consequence" label that describes a variation type,
create an anonymous variant of the specified gene as an instance of
that consequence type.
:param consequence:
:param gene_id:
:param gene_symbol:
:return: allele_id
|
[
"Given",
"a",
"consequence",
"label",
"that",
"describes",
"a",
"variation",
"type",
"create",
"an",
"anonymous",
"variant",
"of",
"the",
"specified",
"gene",
"as",
"an",
"instance",
"of",
"that",
"consequence",
"type",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Decipher.py#L228-L277
|
18,343
|
monarch-initiative/dipper
|
dipper/sources/EBIGene2Phen.py
|
EBIGene2Phen.parse
|
def parse(self, limit: Optional[int]=None):
"""
Here we parse each row of the gene to phenotype file
We create anonymous variants along with their attributes
(allelic requirement, functional consequence)
and connect these to genes and diseases
genes are connected to variants via
global_terms['has_affected_locus']
variants are connected to attributes via:
global_terms['has_allelic_requirement']
global_terms['has_functional_consequence']
variants are connected to disease based on
mappings to the DDD category column,
see the translationtable specific to this source
for mappings
For cases where there are no disease OMIM id,
we either use a disease cache file with mappings
to MONDO that has been manually curated
:param limit: {int} number of rows to parse
:return: None
"""
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
file_path = '/'.join((
self.rawdir, self.files['developmental_disorders']['file']))
with gzip.open(file_path, 'rt') as csvfile:
reader = csv.reader(csvfile)
next(reader) # header
for row in reader:
if limit is None or reader.line_num <= (limit + 1):
self._add_gene_disease(row)
else:
break
LOG.info("Done parsing.")
|
python
|
def parse(self, limit: Optional[int]=None):
"""
Here we parse each row of the gene to phenotype file
We create anonymous variants along with their attributes
(allelic requirement, functional consequence)
and connect these to genes and diseases
genes are connected to variants via
global_terms['has_affected_locus']
variants are connected to attributes via:
global_terms['has_allelic_requirement']
global_terms['has_functional_consequence']
variants are connected to disease based on
mappings to the DDD category column,
see the translationtable specific to this source
for mappings
For cases where there are no disease OMIM id,
we either use a disease cache file with mappings
to MONDO that has been manually curated
:param limit: {int} number of rows to parse
:return: None
"""
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
file_path = '/'.join((
self.rawdir, self.files['developmental_disorders']['file']))
with gzip.open(file_path, 'rt') as csvfile:
reader = csv.reader(csvfile)
next(reader) # header
for row in reader:
if limit is None or reader.line_num <= (limit + 1):
self._add_gene_disease(row)
else:
break
LOG.info("Done parsing.")
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %d rows\"",
",",
"limit",
")",
"LOG",
".",
"info",
"(",
"\"Parsing files...\"",
")",
"file_path",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'developmental_disorders'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"gzip",
".",
"open",
"(",
"file_path",
",",
"'rt'",
")",
"as",
"csvfile",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
")",
"next",
"(",
"reader",
")",
"# header",
"for",
"row",
"in",
"reader",
":",
"if",
"limit",
"is",
"None",
"or",
"reader",
".",
"line_num",
"<=",
"(",
"limit",
"+",
"1",
")",
":",
"self",
".",
"_add_gene_disease",
"(",
"row",
")",
"else",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done parsing.\"",
")"
] |
Here we parse each row of the gene to phenotype file
We create anonymous variants along with their attributes
(allelic requirement, functional consequence)
and connect these to genes and diseases
genes are connected to variants via
global_terms['has_affected_locus']
variants are connected to attributes via:
global_terms['has_allelic_requirement']
global_terms['has_functional_consequence']
variants are connected to disease based on
mappings to the DDD category column,
see the translationtable specific to this source
for mappings
For cases where there are no disease OMIM id,
we either use a disease cache file with mappings
to MONDO that has been manually curated
:param limit: {int} number of rows to parse
:return: None
|
[
"Here",
"we",
"parse",
"each",
"row",
"of",
"the",
"gene",
"to",
"phenotype",
"file"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L104-L147
|
18,344
|
monarch-initiative/dipper
|
dipper/sources/EBIGene2Phen.py
|
EBIGene2Phen._add_gene_disease
|
def _add_gene_disease(self, row): # ::List getting syntax error here
"""
Parse and add gene variant disease model
Model building happens in _build_gene_disease_model
:param row {List}: single row from DDG2P.csv
:return: None
"""
col = self.files['developmental_disorders']['columns']
if len(row) != len(col):
raise ValueError("Unexpected number of fields for row {}".format(row))
variant_label = "variant of {}".format(row[col.index('gene_symbol')])
disease_omim_id = row[col.index('disease_omim_id')]
if disease_omim_id == 'No disease mim':
# check if we've manually curated
disease_label = row[col.index('disease_label')]
if disease_label in self.mondo_map:
disease_id = self.mondo_map[disease_label]
else:
return # sorry for this
else:
disease_id = 'OMIM:' + disease_omim_id
hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')]
relation_curie = self.resolve(row[col.index('g2p_relation_label')])
mutation_consequence = row[col.index('mutation_consequence')]
if mutation_consequence not in ('uncertain', ''):
consequence_relation = self.resolve(
self._get_consequence_predicate(mutation_consequence))
consequence_curie = self.resolve(mutation_consequence)
variant_label = "{} {}".format(mutation_consequence, variant_label)
else:
consequence_relation = None
consequence_curie = None
allelic_requirement = row[col.index('allelic_requirement')]
if allelic_requirement != '':
requirement_curie = self.resolve(allelic_requirement)
else:
requirement_curie = None
pmids = row[col.index('pmids')]
if pmids != '':
pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')]
else:
pmid_list = []
# build the model
# Should we build a reusable object and/or tuple that
# could be passed to a more general model builder for
# this and orphanet (and maybe clinvar)
self._build_gene_disease_model(
hgnc_curie,
relation_curie,
disease_id,
variant_label,
consequence_relation,
consequence_curie,
requirement_curie,
pmid_list
)
|
python
|
def _add_gene_disease(self, row): # ::List getting syntax error here
"""
Parse and add gene variant disease model
Model building happens in _build_gene_disease_model
:param row {List}: single row from DDG2P.csv
:return: None
"""
col = self.files['developmental_disorders']['columns']
if len(row) != len(col):
raise ValueError("Unexpected number of fields for row {}".format(row))
variant_label = "variant of {}".format(row[col.index('gene_symbol')])
disease_omim_id = row[col.index('disease_omim_id')]
if disease_omim_id == 'No disease mim':
# check if we've manually curated
disease_label = row[col.index('disease_label')]
if disease_label in self.mondo_map:
disease_id = self.mondo_map[disease_label]
else:
return # sorry for this
else:
disease_id = 'OMIM:' + disease_omim_id
hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')]
relation_curie = self.resolve(row[col.index('g2p_relation_label')])
mutation_consequence = row[col.index('mutation_consequence')]
if mutation_consequence not in ('uncertain', ''):
consequence_relation = self.resolve(
self._get_consequence_predicate(mutation_consequence))
consequence_curie = self.resolve(mutation_consequence)
variant_label = "{} {}".format(mutation_consequence, variant_label)
else:
consequence_relation = None
consequence_curie = None
allelic_requirement = row[col.index('allelic_requirement')]
if allelic_requirement != '':
requirement_curie = self.resolve(allelic_requirement)
else:
requirement_curie = None
pmids = row[col.index('pmids')]
if pmids != '':
pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')]
else:
pmid_list = []
# build the model
# Should we build a reusable object and/or tuple that
# could be passed to a more general model builder for
# this and orphanet (and maybe clinvar)
self._build_gene_disease_model(
hgnc_curie,
relation_curie,
disease_id,
variant_label,
consequence_relation,
consequence_curie,
requirement_curie,
pmid_list
)
|
[
"def",
"_add_gene_disease",
"(",
"self",
",",
"row",
")",
":",
"# ::List getting syntax error here",
"col",
"=",
"self",
".",
"files",
"[",
"'developmental_disorders'",
"]",
"[",
"'columns'",
"]",
"if",
"len",
"(",
"row",
")",
"!=",
"len",
"(",
"col",
")",
":",
"raise",
"ValueError",
"(",
"\"Unexpected number of fields for row {}\"",
".",
"format",
"(",
"row",
")",
")",
"variant_label",
"=",
"\"variant of {}\"",
".",
"format",
"(",
"row",
"[",
"col",
".",
"index",
"(",
"'gene_symbol'",
")",
"]",
")",
"disease_omim_id",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'disease_omim_id'",
")",
"]",
"if",
"disease_omim_id",
"==",
"'No disease mim'",
":",
"# check if we've manually curated",
"disease_label",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'disease_label'",
")",
"]",
"if",
"disease_label",
"in",
"self",
".",
"mondo_map",
":",
"disease_id",
"=",
"self",
".",
"mondo_map",
"[",
"disease_label",
"]",
"else",
":",
"return",
"# sorry for this",
"else",
":",
"disease_id",
"=",
"'OMIM:'",
"+",
"disease_omim_id",
"hgnc_curie",
"=",
"'HGNC:'",
"+",
"row",
"[",
"col",
".",
"index",
"(",
"'hgnc_id'",
")",
"]",
"relation_curie",
"=",
"self",
".",
"resolve",
"(",
"row",
"[",
"col",
".",
"index",
"(",
"'g2p_relation_label'",
")",
"]",
")",
"mutation_consequence",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'mutation_consequence'",
")",
"]",
"if",
"mutation_consequence",
"not",
"in",
"(",
"'uncertain'",
",",
"''",
")",
":",
"consequence_relation",
"=",
"self",
".",
"resolve",
"(",
"self",
".",
"_get_consequence_predicate",
"(",
"mutation_consequence",
")",
")",
"consequence_curie",
"=",
"self",
".",
"resolve",
"(",
"mutation_consequence",
")",
"variant_label",
"=",
"\"{} {}\"",
".",
"format",
"(",
"mutation_consequence",
",",
"variant_label",
")",
"else",
":",
"consequence_relation",
"=",
"None",
"consequence_curie",
"=",
"None",
"allelic_requirement",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'allelic_requirement'",
")",
"]",
"if",
"allelic_requirement",
"!=",
"''",
":",
"requirement_curie",
"=",
"self",
".",
"resolve",
"(",
"allelic_requirement",
")",
"else",
":",
"requirement_curie",
"=",
"None",
"pmids",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'pmids'",
")",
"]",
"if",
"pmids",
"!=",
"''",
":",
"pmid_list",
"=",
"[",
"'PMID:'",
"+",
"pmid",
"for",
"pmid",
"in",
"pmids",
".",
"split",
"(",
"';'",
")",
"]",
"else",
":",
"pmid_list",
"=",
"[",
"]",
"# build the model",
"# Should we build a reusable object and/or tuple that",
"# could be passed to a more general model builder for",
"# this and orphanet (and maybe clinvar)",
"self",
".",
"_build_gene_disease_model",
"(",
"hgnc_curie",
",",
"relation_curie",
",",
"disease_id",
",",
"variant_label",
",",
"consequence_relation",
",",
"consequence_curie",
",",
"requirement_curie",
",",
"pmid_list",
")"
] |
Parse and add gene variant disease model
Model building happens in _build_gene_disease_model
:param row {List}: single row from DDG2P.csv
:return: None
|
[
"Parse",
"and",
"add",
"gene",
"variant",
"disease",
"model",
"Model",
"building",
"happens",
"in",
"_build_gene_disease_model"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L149-L211
|
18,345
|
monarch-initiative/dipper
|
dipper/sources/EBIGene2Phen.py
|
EBIGene2Phen._build_gene_disease_model
|
def _build_gene_disease_model(
self,
gene_id,
relation_id,
disease_id,
variant_label,
consequence_predicate=None,
consequence_id=None,
allelic_requirement=None,
pmids=None):
"""
Builds gene variant disease model
:return: None
"""
model = Model(self.graph)
geno = Genotype(self.graph)
pmids = [] if pmids is None else pmids
is_variant = False
variant_or_gene = gene_id
variant_id_string = variant_label
variant_bnode = self.make_id(variant_id_string, "_")
if consequence_predicate is not None \
and consequence_id is not None:
is_variant = True
model.addTriple(variant_bnode,
consequence_predicate,
consequence_id)
# Hack to add labels to terms that
# don't exist in an ontology
if consequence_id.startswith(':'):
model.addLabel(consequence_id,
consequence_id.strip(':').replace('_', ' '))
if is_variant:
variant_or_gene = variant_bnode
# Typically we would type the variant using the
# molecular consequence, but these are not specific
# enough for us to make mappings (see translation table)
model.addIndividualToGraph(variant_bnode,
variant_label,
self.globaltt['variant_locus'])
geno.addAffectedLocus(variant_bnode, gene_id)
model.addBlankNodeAnnotation(variant_bnode)
assoc = G2PAssoc(
self.graph, self.name, variant_or_gene, disease_id, relation_id)
assoc.source = pmids
assoc.add_association_to_graph()
if allelic_requirement is not None and is_variant is False:
model.addTriple(
assoc.assoc_id, self.globaltt['has_allelic_requirement'],
allelic_requirement)
if allelic_requirement.startswith(':'):
model.addLabel(
allelic_requirement,
allelic_requirement.strip(':').replace('_', ' '))
|
python
|
def _build_gene_disease_model(
self,
gene_id,
relation_id,
disease_id,
variant_label,
consequence_predicate=None,
consequence_id=None,
allelic_requirement=None,
pmids=None):
"""
Builds gene variant disease model
:return: None
"""
model = Model(self.graph)
geno = Genotype(self.graph)
pmids = [] if pmids is None else pmids
is_variant = False
variant_or_gene = gene_id
variant_id_string = variant_label
variant_bnode = self.make_id(variant_id_string, "_")
if consequence_predicate is not None \
and consequence_id is not None:
is_variant = True
model.addTriple(variant_bnode,
consequence_predicate,
consequence_id)
# Hack to add labels to terms that
# don't exist in an ontology
if consequence_id.startswith(':'):
model.addLabel(consequence_id,
consequence_id.strip(':').replace('_', ' '))
if is_variant:
variant_or_gene = variant_bnode
# Typically we would type the variant using the
# molecular consequence, but these are not specific
# enough for us to make mappings (see translation table)
model.addIndividualToGraph(variant_bnode,
variant_label,
self.globaltt['variant_locus'])
geno.addAffectedLocus(variant_bnode, gene_id)
model.addBlankNodeAnnotation(variant_bnode)
assoc = G2PAssoc(
self.graph, self.name, variant_or_gene, disease_id, relation_id)
assoc.source = pmids
assoc.add_association_to_graph()
if allelic_requirement is not None and is_variant is False:
model.addTriple(
assoc.assoc_id, self.globaltt['has_allelic_requirement'],
allelic_requirement)
if allelic_requirement.startswith(':'):
model.addLabel(
allelic_requirement,
allelic_requirement.strip(':').replace('_', ' '))
|
[
"def",
"_build_gene_disease_model",
"(",
"self",
",",
"gene_id",
",",
"relation_id",
",",
"disease_id",
",",
"variant_label",
",",
"consequence_predicate",
"=",
"None",
",",
"consequence_id",
"=",
"None",
",",
"allelic_requirement",
"=",
"None",
",",
"pmids",
"=",
"None",
")",
":",
"model",
"=",
"Model",
"(",
"self",
".",
"graph",
")",
"geno",
"=",
"Genotype",
"(",
"self",
".",
"graph",
")",
"pmids",
"=",
"[",
"]",
"if",
"pmids",
"is",
"None",
"else",
"pmids",
"is_variant",
"=",
"False",
"variant_or_gene",
"=",
"gene_id",
"variant_id_string",
"=",
"variant_label",
"variant_bnode",
"=",
"self",
".",
"make_id",
"(",
"variant_id_string",
",",
"\"_\"",
")",
"if",
"consequence_predicate",
"is",
"not",
"None",
"and",
"consequence_id",
"is",
"not",
"None",
":",
"is_variant",
"=",
"True",
"model",
".",
"addTriple",
"(",
"variant_bnode",
",",
"consequence_predicate",
",",
"consequence_id",
")",
"# Hack to add labels to terms that",
"# don't exist in an ontology",
"if",
"consequence_id",
".",
"startswith",
"(",
"':'",
")",
":",
"model",
".",
"addLabel",
"(",
"consequence_id",
",",
"consequence_id",
".",
"strip",
"(",
"':'",
")",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
")",
"if",
"is_variant",
":",
"variant_or_gene",
"=",
"variant_bnode",
"# Typically we would type the variant using the",
"# molecular consequence, but these are not specific",
"# enough for us to make mappings (see translation table)",
"model",
".",
"addIndividualToGraph",
"(",
"variant_bnode",
",",
"variant_label",
",",
"self",
".",
"globaltt",
"[",
"'variant_locus'",
"]",
")",
"geno",
".",
"addAffectedLocus",
"(",
"variant_bnode",
",",
"gene_id",
")",
"model",
".",
"addBlankNodeAnnotation",
"(",
"variant_bnode",
")",
"assoc",
"=",
"G2PAssoc",
"(",
"self",
".",
"graph",
",",
"self",
".",
"name",
",",
"variant_or_gene",
",",
"disease_id",
",",
"relation_id",
")",
"assoc",
".",
"source",
"=",
"pmids",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"if",
"allelic_requirement",
"is",
"not",
"None",
"and",
"is_variant",
"is",
"False",
":",
"model",
".",
"addTriple",
"(",
"assoc",
".",
"assoc_id",
",",
"self",
".",
"globaltt",
"[",
"'has_allelic_requirement'",
"]",
",",
"allelic_requirement",
")",
"if",
"allelic_requirement",
".",
"startswith",
"(",
"':'",
")",
":",
"model",
".",
"addLabel",
"(",
"allelic_requirement",
",",
"allelic_requirement",
".",
"strip",
"(",
"':'",
")",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
")"
] |
Builds gene variant disease model
:return: None
|
[
"Builds",
"gene",
"variant",
"disease",
"model"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L213-L274
|
18,346
|
monarch-initiative/dipper
|
dipper/sources/BioGrid.py
|
BioGrid._get_identifiers
|
def _get_identifiers(self, limit):
"""
This will process the id mapping file provided by Biogrid.
The file has a very large header, which we scan past,
then pull the identifiers, and make equivalence axioms
:param limit:
:return:
"""
LOG.info("getting identifier mapping")
line_counter = 0
f = '/'.join((self.rawdir, self.files['identifiers']['file']))
myzip = ZipFile(f, 'r')
# assume that the first entry is the item
fname = myzip.namelist()[0]
foundheader = False
# TODO align this species filter with the one above
# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,
# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')
speciesfilters = 'Homo sapiens,Mus musculus'.split(',')
with myzip.open(fname, 'r') as csvfile:
for line in csvfile:
# skip header lines
if not foundheader:
if re.match(r'BIOGRID_ID', line.decode()):
foundheader = True
continue
line = line.decode().strip()
# BIOGRID_ID
# IDENTIFIER_VALUE
# IDENTIFIER_TYPE
# ORGANISM_OFFICIAL_NAME
# 1 814566 ENTREZ_GENE Arabidopsis thaliana
(biogrid_num, id_num, id_type,
organism_label) = line.split('\t')
if self.test_mode:
graph = self.testgraph
# skip any genes that don't match our test set
if int(biogrid_num) not in self.biogrid_ids:
continue
else:
graph = self.graph
model = Model(graph)
# for each one of these,
# create the node and add equivalent classes
biogrid_id = 'BIOGRID:'+biogrid_num
prefix = self.localtt[id_type]
# TODO make these filters available as commandline options
# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,
# WormBase,XenBase,ENSEMBL,miRBase'.split(',')
geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',')
# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'
if (speciesfilters is not None) \
and (organism_label.strip() in speciesfilters):
line_counter += 1
if (geneidtypefilters is not None) \
and (prefix in geneidtypefilters):
mapped_id = ':'.join((prefix, id_num))
model.addEquivalentClass(biogrid_id, mapped_id)
# this symbol will only get attached to the biogrid class
elif id_type == 'OFFICIAL_SYMBOL':
model.addClassToGraph(biogrid_id, id_num)
# elif (id_type == 'SYNONYM'):
# FIXME - i am not sure these are synonyms, altids?
# gu.addSynonym(g,biogrid_id,id_num)
if not self.test_mode and limit is not None and line_counter > limit:
break
myzip.close()
return
|
python
|
def _get_identifiers(self, limit):
"""
This will process the id mapping file provided by Biogrid.
The file has a very large header, which we scan past,
then pull the identifiers, and make equivalence axioms
:param limit:
:return:
"""
LOG.info("getting identifier mapping")
line_counter = 0
f = '/'.join((self.rawdir, self.files['identifiers']['file']))
myzip = ZipFile(f, 'r')
# assume that the first entry is the item
fname = myzip.namelist()[0]
foundheader = False
# TODO align this species filter with the one above
# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,
# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')
speciesfilters = 'Homo sapiens,Mus musculus'.split(',')
with myzip.open(fname, 'r') as csvfile:
for line in csvfile:
# skip header lines
if not foundheader:
if re.match(r'BIOGRID_ID', line.decode()):
foundheader = True
continue
line = line.decode().strip()
# BIOGRID_ID
# IDENTIFIER_VALUE
# IDENTIFIER_TYPE
# ORGANISM_OFFICIAL_NAME
# 1 814566 ENTREZ_GENE Arabidopsis thaliana
(biogrid_num, id_num, id_type,
organism_label) = line.split('\t')
if self.test_mode:
graph = self.testgraph
# skip any genes that don't match our test set
if int(biogrid_num) not in self.biogrid_ids:
continue
else:
graph = self.graph
model = Model(graph)
# for each one of these,
# create the node and add equivalent classes
biogrid_id = 'BIOGRID:'+biogrid_num
prefix = self.localtt[id_type]
# TODO make these filters available as commandline options
# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,
# WormBase,XenBase,ENSEMBL,miRBase'.split(',')
geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',')
# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'
if (speciesfilters is not None) \
and (organism_label.strip() in speciesfilters):
line_counter += 1
if (geneidtypefilters is not None) \
and (prefix in geneidtypefilters):
mapped_id = ':'.join((prefix, id_num))
model.addEquivalentClass(biogrid_id, mapped_id)
# this symbol will only get attached to the biogrid class
elif id_type == 'OFFICIAL_SYMBOL':
model.addClassToGraph(biogrid_id, id_num)
# elif (id_type == 'SYNONYM'):
# FIXME - i am not sure these are synonyms, altids?
# gu.addSynonym(g,biogrid_id,id_num)
if not self.test_mode and limit is not None and line_counter > limit:
break
myzip.close()
return
|
[
"def",
"_get_identifiers",
"(",
"self",
",",
"limit",
")",
":",
"LOG",
".",
"info",
"(",
"\"getting identifier mapping\"",
")",
"line_counter",
"=",
"0",
"f",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'identifiers'",
"]",
"[",
"'file'",
"]",
")",
")",
"myzip",
"=",
"ZipFile",
"(",
"f",
",",
"'r'",
")",
"# assume that the first entry is the item",
"fname",
"=",
"myzip",
".",
"namelist",
"(",
")",
"[",
"0",
"]",
"foundheader",
"=",
"False",
"# TODO align this species filter with the one above",
"# speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,",
"# Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')",
"speciesfilters",
"=",
"'Homo sapiens,Mus musculus'",
".",
"split",
"(",
"','",
")",
"with",
"myzip",
".",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"csvfile",
":",
"for",
"line",
"in",
"csvfile",
":",
"# skip header lines",
"if",
"not",
"foundheader",
":",
"if",
"re",
".",
"match",
"(",
"r'BIOGRID_ID'",
",",
"line",
".",
"decode",
"(",
")",
")",
":",
"foundheader",
"=",
"True",
"continue",
"line",
"=",
"line",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
"# BIOGRID_ID",
"# IDENTIFIER_VALUE",
"# IDENTIFIER_TYPE",
"# ORGANISM_OFFICIAL_NAME",
"# 1\t814566\tENTREZ_GENE\tArabidopsis thaliana",
"(",
"biogrid_num",
",",
"id_num",
",",
"id_type",
",",
"organism_label",
")",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"# skip any genes that don't match our test set",
"if",
"int",
"(",
"biogrid_num",
")",
"not",
"in",
"self",
".",
"biogrid_ids",
":",
"continue",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"# for each one of these,",
"# create the node and add equivalent classes",
"biogrid_id",
"=",
"'BIOGRID:'",
"+",
"biogrid_num",
"prefix",
"=",
"self",
".",
"localtt",
"[",
"id_type",
"]",
"# TODO make these filters available as commandline options",
"# geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,",
"# WormBase,XenBase,ENSEMBL,miRBase'.split(',')",
"geneidtypefilters",
"=",
"'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'",
".",
"split",
"(",
"','",
")",
"# proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'",
"if",
"(",
"speciesfilters",
"is",
"not",
"None",
")",
"and",
"(",
"organism_label",
".",
"strip",
"(",
")",
"in",
"speciesfilters",
")",
":",
"line_counter",
"+=",
"1",
"if",
"(",
"geneidtypefilters",
"is",
"not",
"None",
")",
"and",
"(",
"prefix",
"in",
"geneidtypefilters",
")",
":",
"mapped_id",
"=",
"':'",
".",
"join",
"(",
"(",
"prefix",
",",
"id_num",
")",
")",
"model",
".",
"addEquivalentClass",
"(",
"biogrid_id",
",",
"mapped_id",
")",
"# this symbol will only get attached to the biogrid class",
"elif",
"id_type",
"==",
"'OFFICIAL_SYMBOL'",
":",
"model",
".",
"addClassToGraph",
"(",
"biogrid_id",
",",
"id_num",
")",
"# elif (id_type == 'SYNONYM'):",
"# FIXME - i am not sure these are synonyms, altids?",
"# gu.addSynonym(g,biogrid_id,id_num)",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"myzip",
".",
"close",
"(",
")",
"return"
] |
This will process the id mapping file provided by Biogrid.
The file has a very large header, which we scan past,
then pull the identifiers, and make equivalence axioms
:param limit:
:return:
|
[
"This",
"will",
"process",
"the",
"id",
"mapping",
"file",
"provided",
"by",
"Biogrid",
".",
"The",
"file",
"has",
"a",
"very",
"large",
"header",
"which",
"we",
"scan",
"past",
"then",
"pull",
"the",
"identifiers",
"and",
"make",
"equivalence",
"axioms"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/BioGrid.py#L201-L281
|
18,347
|
monarch-initiative/dipper
|
dipper/models/Evidence.py
|
Evidence.add_supporting_evidence
|
def add_supporting_evidence(self, evidence_line, evidence_type=None, label=None):
"""
Add supporting line of evidence node to association id
:param evidence_line: curie or iri, evidence line
:param evidence_type: curie or iri, evidence type if available
:return: None
"""
self.graph.addTriple(
self.association, self.globaltt['has_supporting_evidence_line'],
evidence_line)
if evidence_type is not None:
self.model.addIndividualToGraph(evidence_line, label, evidence_type)
return
|
python
|
def add_supporting_evidence(self, evidence_line, evidence_type=None, label=None):
"""
Add supporting line of evidence node to association id
:param evidence_line: curie or iri, evidence line
:param evidence_type: curie or iri, evidence type if available
:return: None
"""
self.graph.addTriple(
self.association, self.globaltt['has_supporting_evidence_line'],
evidence_line)
if evidence_type is not None:
self.model.addIndividualToGraph(evidence_line, label, evidence_type)
return
|
[
"def",
"add_supporting_evidence",
"(",
"self",
",",
"evidence_line",
",",
"evidence_type",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"association",
",",
"self",
".",
"globaltt",
"[",
"'has_supporting_evidence_line'",
"]",
",",
"evidence_line",
")",
"if",
"evidence_type",
"is",
"not",
"None",
":",
"self",
".",
"model",
".",
"addIndividualToGraph",
"(",
"evidence_line",
",",
"label",
",",
"evidence_type",
")",
"return"
] |
Add supporting line of evidence node to association id
:param evidence_line: curie or iri, evidence line
:param evidence_type: curie or iri, evidence type if available
:return: None
|
[
"Add",
"supporting",
"line",
"of",
"evidence",
"node",
"to",
"association",
"id"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Evidence.py#L34-L47
|
18,348
|
monarch-initiative/dipper
|
dipper/models/assoc/G2PAssoc.py
|
G2PAssoc.add_association_to_graph
|
def add_association_to_graph(self):
"""
Overrides Association by including bnode support
The reified relationship between a genotype (or any genotype part)
and a phenotype is decorated with some provenance information.
This makes the assumption that
both the genotype and phenotype are classes.
currently hardcoded to map the annotation to the monarch namespace
:param g:
:return:
"""
Assoc.add_association_to_graph(self)
# make a blank stage
if self.start_stage_id or self.end_stage_id is not None:
stage_process_id = '-'.join((str(self.start_stage_id),
str(self.end_stage_id)))
stage_process_id = '_:'+re.sub(r':', '', stage_process_id)
self.model.addIndividualToGraph(
stage_process_id, None, self.globaltt['developmental_process'])
self.graph.addTriple(
stage_process_id, self.globaltt['starts during'], self.start_stage_id)
self.graph.addTriple(
stage_process_id, self.globaltt['ends during'], self.end_stage_id)
self.stage_process_id = stage_process_id
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id)
if self.environment_id is not None:
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.environment_id)
return
|
python
|
def add_association_to_graph(self):
"""
Overrides Association by including bnode support
The reified relationship between a genotype (or any genotype part)
and a phenotype is decorated with some provenance information.
This makes the assumption that
both the genotype and phenotype are classes.
currently hardcoded to map the annotation to the monarch namespace
:param g:
:return:
"""
Assoc.add_association_to_graph(self)
# make a blank stage
if self.start_stage_id or self.end_stage_id is not None:
stage_process_id = '-'.join((str(self.start_stage_id),
str(self.end_stage_id)))
stage_process_id = '_:'+re.sub(r':', '', stage_process_id)
self.model.addIndividualToGraph(
stage_process_id, None, self.globaltt['developmental_process'])
self.graph.addTriple(
stage_process_id, self.globaltt['starts during'], self.start_stage_id)
self.graph.addTriple(
stage_process_id, self.globaltt['ends during'], self.end_stage_id)
self.stage_process_id = stage_process_id
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id)
if self.environment_id is not None:
self.graph.addTriple(
self.assoc_id, self.globaltt['has_qualifier'], self.environment_id)
return
|
[
"def",
"add_association_to_graph",
"(",
"self",
")",
":",
"Assoc",
".",
"add_association_to_graph",
"(",
"self",
")",
"# make a blank stage",
"if",
"self",
".",
"start_stage_id",
"or",
"self",
".",
"end_stage_id",
"is",
"not",
"None",
":",
"stage_process_id",
"=",
"'-'",
".",
"join",
"(",
"(",
"str",
"(",
"self",
".",
"start_stage_id",
")",
",",
"str",
"(",
"self",
".",
"end_stage_id",
")",
")",
")",
"stage_process_id",
"=",
"'_:'",
"+",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"stage_process_id",
")",
"self",
".",
"model",
".",
"addIndividualToGraph",
"(",
"stage_process_id",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'developmental_process'",
"]",
")",
"self",
".",
"graph",
".",
"addTriple",
"(",
"stage_process_id",
",",
"self",
".",
"globaltt",
"[",
"'starts during'",
"]",
",",
"self",
".",
"start_stage_id",
")",
"self",
".",
"graph",
".",
"addTriple",
"(",
"stage_process_id",
",",
"self",
".",
"globaltt",
"[",
"'ends during'",
"]",
",",
"self",
".",
"end_stage_id",
")",
"self",
".",
"stage_process_id",
"=",
"stage_process_id",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"assoc_id",
",",
"self",
".",
"globaltt",
"[",
"'has_qualifier'",
"]",
",",
"self",
".",
"stage_process_id",
")",
"if",
"self",
".",
"environment_id",
"is",
"not",
"None",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"assoc_id",
",",
"self",
".",
"globaltt",
"[",
"'has_qualifier'",
"]",
",",
"self",
".",
"environment_id",
")",
"return"
] |
Overrides Association by including bnode support
The reified relationship between a genotype (or any genotype part)
and a phenotype is decorated with some provenance information.
This makes the assumption that
both the genotype and phenotype are classes.
currently hardcoded to map the annotation to the monarch namespace
:param g:
:return:
|
[
"Overrides",
"Association",
"by",
"including",
"bnode",
"support"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/G2PAssoc.py#L66-L103
|
18,349
|
monarch-initiative/dipper
|
dipper/sources/MPD.py
|
MPD.parse
|
def parse(self, limit=None):
"""
MPD data is delivered in four separate csv files and one xml file,
which we process iteratively and write out as
one large graph.
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
self._process_straininfo(limit)
# the following will provide us the hash-lookups
# These must be processed in a specific order
# mapping between assays and ontology terms
self._process_ontology_mappings_file(limit)
# this is the metadata about the measurements
self._process_measurements_file(limit)
# get all the measurements per strain
self._process_strainmeans_file(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._fill_provenance_graph(limit)
LOG.info("Finished parsing.")
return
|
python
|
def parse(self, limit=None):
"""
MPD data is delivered in four separate csv files and one xml file,
which we process iteratively and write out as
one large graph.
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
self._process_straininfo(limit)
# the following will provide us the hash-lookups
# These must be processed in a specific order
# mapping between assays and ontology terms
self._process_ontology_mappings_file(limit)
# this is the metadata about the measurements
self._process_measurements_file(limit)
# get all the measurements per strain
self._process_strainmeans_file(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._fill_provenance_graph(limit)
LOG.info("Finished parsing.")
return
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %s rows fo each file\"",
",",
"str",
"(",
"limit",
")",
")",
"LOG",
".",
"info",
"(",
"\"Parsing files...\"",
")",
"self",
".",
"_process_straininfo",
"(",
"limit",
")",
"# the following will provide us the hash-lookups",
"# These must be processed in a specific order",
"# mapping between assays and ontology terms",
"self",
".",
"_process_ontology_mappings_file",
"(",
"limit",
")",
"# this is the metadata about the measurements",
"self",
".",
"_process_measurements_file",
"(",
"limit",
")",
"# get all the measurements per strain",
"self",
".",
"_process_strainmeans_file",
"(",
"limit",
")",
"# The following will use the hash populated above",
"# to lookup the ids when filling in the graph",
"self",
".",
"_fill_provenance_graph",
"(",
"limit",
")",
"LOG",
".",
"info",
"(",
"\"Finished parsing.\"",
")",
"return"
] |
MPD data is delivered in four separate csv files and one xml file,
which we process iteratively and write out as
one large graph.
:param limit:
:return:
|
[
"MPD",
"data",
"is",
"delivered",
"in",
"four",
"separate",
"csv",
"files",
"and",
"one",
"xml",
"file",
"which",
"we",
"process",
"iteratively",
"and",
"write",
"out",
"as",
"one",
"large",
"graph",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L112-L142
|
18,350
|
monarch-initiative/dipper
|
dipper/sources/MPD.py
|
MPD._add_g2p_assoc
|
def _add_g2p_assoc(self, graph, strain_id, sex, assay_id, phenotypes, comment):
"""
Create an association between a sex-specific strain id
and each of the phenotypes.
Here, we create a genotype from the strain,
and a sex-specific genotype.
Each of those genotypes are created as anonymous nodes.
The evidence code is hardcoded to be:
ECO:experimental_phenotypic_evidence.
:param g:
:param strain_id:
:param sex:
:param assay_id:
:param phenotypes: a list of phenotypes to association with the strain
:param comment:
:return:
"""
geno = Genotype(graph)
model = Model(graph)
eco_id = self.globaltt['experimental phenotypic evidence']
strain_label = self.idlabel_hash.get(strain_id)
# strain genotype
genotype_id = '_:'+'-'.join((re.sub(r':', '', strain_id), 'genotype'))
genotype_label = '[' + strain_label + ']'
sex_specific_genotype_id = '_:'+'-'.join((
re.sub(r':', '', strain_id), sex, 'genotype'))
if strain_label is not None:
sex_specific_genotype_label = strain_label + ' (' + sex + ')'
else:
sex_specific_genotype_label = strain_id + '(' + sex + ')'
genotype_type = self.globaltt['sex_qualified_genotype']
if sex == 'm':
genotype_type = self.globaltt['male_genotype']
elif sex == 'f':
genotype_type = self.globaltt['female_genotype']
# add the genotype to strain connection
geno.addGenotype(
genotype_id, genotype_label,
self.globaltt['genomic_background'])
graph.addTriple(
strain_id, self.globaltt['has_genotype'], genotype_id)
geno.addGenotype(
sex_specific_genotype_id, sex_specific_genotype_label,
genotype_type)
# add the strain as the background for the genotype
graph.addTriple(
sex_specific_genotype_id,
self.globaltt['has_sex_agnostic_part'],
genotype_id)
# ############# BUILD THE G2P ASSOC #############
# TODO add more provenance info when that model is completed
if phenotypes is not None:
for phenotype_id in phenotypes:
assoc = G2PAssoc(
graph, self.name, sex_specific_genotype_id, phenotype_id)
assoc.add_evidence(assay_id)
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
model.addComment(assoc_id, comment)
model._addSexSpecificity(assoc_id, self.resolve(sex))
return
|
python
|
def _add_g2p_assoc(self, graph, strain_id, sex, assay_id, phenotypes, comment):
"""
Create an association between a sex-specific strain id
and each of the phenotypes.
Here, we create a genotype from the strain,
and a sex-specific genotype.
Each of those genotypes are created as anonymous nodes.
The evidence code is hardcoded to be:
ECO:experimental_phenotypic_evidence.
:param g:
:param strain_id:
:param sex:
:param assay_id:
:param phenotypes: a list of phenotypes to association with the strain
:param comment:
:return:
"""
geno = Genotype(graph)
model = Model(graph)
eco_id = self.globaltt['experimental phenotypic evidence']
strain_label = self.idlabel_hash.get(strain_id)
# strain genotype
genotype_id = '_:'+'-'.join((re.sub(r':', '', strain_id), 'genotype'))
genotype_label = '[' + strain_label + ']'
sex_specific_genotype_id = '_:'+'-'.join((
re.sub(r':', '', strain_id), sex, 'genotype'))
if strain_label is not None:
sex_specific_genotype_label = strain_label + ' (' + sex + ')'
else:
sex_specific_genotype_label = strain_id + '(' + sex + ')'
genotype_type = self.globaltt['sex_qualified_genotype']
if sex == 'm':
genotype_type = self.globaltt['male_genotype']
elif sex == 'f':
genotype_type = self.globaltt['female_genotype']
# add the genotype to strain connection
geno.addGenotype(
genotype_id, genotype_label,
self.globaltt['genomic_background'])
graph.addTriple(
strain_id, self.globaltt['has_genotype'], genotype_id)
geno.addGenotype(
sex_specific_genotype_id, sex_specific_genotype_label,
genotype_type)
# add the strain as the background for the genotype
graph.addTriple(
sex_specific_genotype_id,
self.globaltt['has_sex_agnostic_part'],
genotype_id)
# ############# BUILD THE G2P ASSOC #############
# TODO add more provenance info when that model is completed
if phenotypes is not None:
for phenotype_id in phenotypes:
assoc = G2PAssoc(
graph, self.name, sex_specific_genotype_id, phenotype_id)
assoc.add_evidence(assay_id)
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
model.addComment(assoc_id, comment)
model._addSexSpecificity(assoc_id, self.resolve(sex))
return
|
[
"def",
"_add_g2p_assoc",
"(",
"self",
",",
"graph",
",",
"strain_id",
",",
"sex",
",",
"assay_id",
",",
"phenotypes",
",",
"comment",
")",
":",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"model",
"=",
"Model",
"(",
"graph",
")",
"eco_id",
"=",
"self",
".",
"globaltt",
"[",
"'experimental phenotypic evidence'",
"]",
"strain_label",
"=",
"self",
".",
"idlabel_hash",
".",
"get",
"(",
"strain_id",
")",
"# strain genotype",
"genotype_id",
"=",
"'_:'",
"+",
"'-'",
".",
"join",
"(",
"(",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"strain_id",
")",
",",
"'genotype'",
")",
")",
"genotype_label",
"=",
"'['",
"+",
"strain_label",
"+",
"']'",
"sex_specific_genotype_id",
"=",
"'_:'",
"+",
"'-'",
".",
"join",
"(",
"(",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"strain_id",
")",
",",
"sex",
",",
"'genotype'",
")",
")",
"if",
"strain_label",
"is",
"not",
"None",
":",
"sex_specific_genotype_label",
"=",
"strain_label",
"+",
"' ('",
"+",
"sex",
"+",
"')'",
"else",
":",
"sex_specific_genotype_label",
"=",
"strain_id",
"+",
"'('",
"+",
"sex",
"+",
"')'",
"genotype_type",
"=",
"self",
".",
"globaltt",
"[",
"'sex_qualified_genotype'",
"]",
"if",
"sex",
"==",
"'m'",
":",
"genotype_type",
"=",
"self",
".",
"globaltt",
"[",
"'male_genotype'",
"]",
"elif",
"sex",
"==",
"'f'",
":",
"genotype_type",
"=",
"self",
".",
"globaltt",
"[",
"'female_genotype'",
"]",
"# add the genotype to strain connection",
"geno",
".",
"addGenotype",
"(",
"genotype_id",
",",
"genotype_label",
",",
"self",
".",
"globaltt",
"[",
"'genomic_background'",
"]",
")",
"graph",
".",
"addTriple",
"(",
"strain_id",
",",
"self",
".",
"globaltt",
"[",
"'has_genotype'",
"]",
",",
"genotype_id",
")",
"geno",
".",
"addGenotype",
"(",
"sex_specific_genotype_id",
",",
"sex_specific_genotype_label",
",",
"genotype_type",
")",
"# add the strain as the background for the genotype",
"graph",
".",
"addTriple",
"(",
"sex_specific_genotype_id",
",",
"self",
".",
"globaltt",
"[",
"'has_sex_agnostic_part'",
"]",
",",
"genotype_id",
")",
"# ############# BUILD THE G2P ASSOC #############",
"# TODO add more provenance info when that model is completed",
"if",
"phenotypes",
"is",
"not",
"None",
":",
"for",
"phenotype_id",
"in",
"phenotypes",
":",
"assoc",
"=",
"G2PAssoc",
"(",
"graph",
",",
"self",
".",
"name",
",",
"sex_specific_genotype_id",
",",
"phenotype_id",
")",
"assoc",
".",
"add_evidence",
"(",
"assay_id",
")",
"assoc",
".",
"add_evidence",
"(",
"eco_id",
")",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"assoc_id",
"=",
"assoc",
".",
"get_association_id",
"(",
")",
"model",
".",
"addComment",
"(",
"assoc_id",
",",
"comment",
")",
"model",
".",
"_addSexSpecificity",
"(",
"assoc_id",
",",
"self",
".",
"resolve",
"(",
"sex",
")",
")",
"return"
] |
Create an association between a sex-specific strain id
and each of the phenotypes.
Here, we create a genotype from the strain,
and a sex-specific genotype.
Each of those genotypes are created as anonymous nodes.
The evidence code is hardcoded to be:
ECO:experimental_phenotypic_evidence.
:param g:
:param strain_id:
:param sex:
:param assay_id:
:param phenotypes: a list of phenotypes to association with the strain
:param comment:
:return:
|
[
"Create",
"an",
"association",
"between",
"a",
"sex",
"-",
"specific",
"strain",
"id",
"and",
"each",
"of",
"the",
"phenotypes",
".",
"Here",
"we",
"create",
"a",
"genotype",
"from",
"the",
"strain",
"and",
"a",
"sex",
"-",
"specific",
"genotype",
".",
"Each",
"of",
"those",
"genotypes",
"are",
"created",
"as",
"anonymous",
"nodes",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L385-L457
|
18,351
|
monarch-initiative/dipper
|
dipper/sources/IMPC.py
|
IMPC.parse
|
def parse(self, limit=None):
"""
IMPC data is delivered in three separate csv files OR
in one integrated file, each with the same file format.
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# for f in ['impc', 'euro', 'mgd', '3i']:
for f in ['all']:
file = '/'.join((self.rawdir, self.files[f]['file']))
self._process_data(file, limit)
LOG.info("Finished parsing")
return
|
python
|
def parse(self, limit=None):
"""
IMPC data is delivered in three separate csv files OR
in one integrated file, each with the same file format.
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# for f in ['impc', 'euro', 'mgd', '3i']:
for f in ['all']:
file = '/'.join((self.rawdir, self.files[f]['file']))
self._process_data(file, limit)
LOG.info("Finished parsing")
return
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %s rows fo each file\"",
",",
"str",
"(",
"limit",
")",
")",
"LOG",
".",
"info",
"(",
"\"Parsing files...\"",
")",
"if",
"self",
".",
"test_only",
":",
"self",
".",
"test_mode",
"=",
"True",
"# for f in ['impc', 'euro', 'mgd', '3i']:",
"for",
"f",
"in",
"[",
"'all'",
"]",
":",
"file",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"f",
"]",
"[",
"'file'",
"]",
")",
")",
"self",
".",
"_process_data",
"(",
"file",
",",
"limit",
")",
"LOG",
".",
"info",
"(",
"\"Finished parsing\"",
")",
"return"
] |
IMPC data is delivered in three separate csv files OR
in one integrated file, each with the same file format.
:param limit:
:return:
|
[
"IMPC",
"data",
"is",
"delivered",
"in",
"three",
"separate",
"csv",
"files",
"OR",
"in",
"one",
"integrated",
"file",
"each",
"with",
"the",
"same",
"file",
"format",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/IMPC.py#L119-L143
|
18,352
|
monarch-initiative/dipper
|
dipper/models/Pathway.py
|
Pathway.addGeneToPathway
|
def addGeneToPathway(self, gene_id, pathway_id):
"""
When adding a gene to a pathway, we create an intermediate
'gene product' that is involved in
the pathway, through a blank node.
gene_id RO:has_gene_product _gene_product
_gene_product RO:involved_in pathway_id
:param pathway_id:
:param gene_id:
:return:
"""
gene_product = '_:'+re.sub(r':', '', gene_id) + 'product'
self.model.addIndividualToGraph(
gene_product, None, self.globaltt['gene_product'])
self.graph.addTriple(
gene_id, self.globaltt['has gene product'], gene_product)
self.addComponentToPathway(gene_product, pathway_id)
return
|
python
|
def addGeneToPathway(self, gene_id, pathway_id):
"""
When adding a gene to a pathway, we create an intermediate
'gene product' that is involved in
the pathway, through a blank node.
gene_id RO:has_gene_product _gene_product
_gene_product RO:involved_in pathway_id
:param pathway_id:
:param gene_id:
:return:
"""
gene_product = '_:'+re.sub(r':', '', gene_id) + 'product'
self.model.addIndividualToGraph(
gene_product, None, self.globaltt['gene_product'])
self.graph.addTriple(
gene_id, self.globaltt['has gene product'], gene_product)
self.addComponentToPathway(gene_product, pathway_id)
return
|
[
"def",
"addGeneToPathway",
"(",
"self",
",",
"gene_id",
",",
"pathway_id",
")",
":",
"gene_product",
"=",
"'_:'",
"+",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"gene_id",
")",
"+",
"'product'",
"self",
".",
"model",
".",
"addIndividualToGraph",
"(",
"gene_product",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'gene_product'",
"]",
")",
"self",
".",
"graph",
".",
"addTriple",
"(",
"gene_id",
",",
"self",
".",
"globaltt",
"[",
"'has gene product'",
"]",
",",
"gene_product",
")",
"self",
".",
"addComponentToPathway",
"(",
"gene_product",
",",
"pathway_id",
")",
"return"
] |
When adding a gene to a pathway, we create an intermediate
'gene product' that is involved in
the pathway, through a blank node.
gene_id RO:has_gene_product _gene_product
_gene_product RO:involved_in pathway_id
:param pathway_id:
:param gene_id:
:return:
|
[
"When",
"adding",
"a",
"gene",
"to",
"a",
"pathway",
"we",
"create",
"an",
"intermediate",
"gene",
"product",
"that",
"is",
"involved",
"in",
"the",
"pathway",
"through",
"a",
"blank",
"node",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L50-L71
|
18,353
|
monarch-initiative/dipper
|
dipper/models/Pathway.py
|
Pathway.addComponentToPathway
|
def addComponentToPathway(self, component_id, pathway_id):
"""
This can be used directly when the component is directly involved in
the pathway. If a transforming event is performed on the component
first, then the addGeneToPathway should be used instead.
:param pathway_id:
:param component_id:
:return:
"""
self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id)
return
|
python
|
def addComponentToPathway(self, component_id, pathway_id):
"""
This can be used directly when the component is directly involved in
the pathway. If a transforming event is performed on the component
first, then the addGeneToPathway should be used instead.
:param pathway_id:
:param component_id:
:return:
"""
self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id)
return
|
[
"def",
"addComponentToPathway",
"(",
"self",
",",
"component_id",
",",
"pathway_id",
")",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"component_id",
",",
"self",
".",
"globaltt",
"[",
"'involved in'",
"]",
",",
"pathway_id",
")",
"return"
] |
This can be used directly when the component is directly involved in
the pathway. If a transforming event is performed on the component
first, then the addGeneToPathway should be used instead.
:param pathway_id:
:param component_id:
:return:
|
[
"This",
"can",
"be",
"used",
"directly",
"when",
"the",
"component",
"is",
"directly",
"involved",
"in",
"the",
"pathway",
".",
"If",
"a",
"transforming",
"event",
"is",
"performed",
"on",
"the",
"component",
"first",
"then",
"the",
"addGeneToPathway",
"should",
"be",
"used",
"instead",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L73-L85
|
18,354
|
monarch-initiative/dipper
|
dipper/sources/Source.py
|
Source.write
|
def write(self, fmt='turtle', stream=None):
"""
This convenience method will write out all of the graphs
associated with the source.
Right now these are hardcoded to be a single "graph"
and a "src_dataset.ttl" and a "src_test.ttl"
If you do not supply stream='stdout'
it will default write these to files.
In addition, if the version number isn't yet set in the dataset,
it will be set to the date on file.
:return: None
"""
fmt_ext = {
'rdfxml': 'xml',
'turtle': 'ttl',
'nt': 'nt', # ntriples
'nquads': 'nq',
'n3': 'n3' # notation3
}
# make the regular graph output file
dest = None
if self.name is not None:
dest = '/'.join((self.outdir, self.name))
if fmt in fmt_ext:
dest = '.'.join((dest, fmt_ext.get(fmt)))
else:
dest = '.'.join((dest, fmt))
LOG.info("Setting outfile to %s", dest)
# make the dataset_file name, always format as turtle
self.datasetfile = '/'.join(
(self.outdir, self.name + '_dataset.ttl'))
LOG.info("Setting dataset file to %s", self.datasetfile)
if self.dataset is not None and self.dataset.version is None:
self.dataset.set_version_by_date()
LOG.info("No version for %s setting to date issued.", self.name)
else:
LOG.warning("No output file set. Using stdout")
stream = 'stdout'
gu = GraphUtils(None)
# the _dataset description is always turtle
gu.write(self.dataset.getGraph(), 'turtle', filename=self.datasetfile)
if self.test_mode:
# unless we stop hardcoding, the test dataset is always turtle
LOG.info("Setting testfile to %s", self.testfile)
gu.write(self.testgraph, 'turtle', filename=self.testfile)
# print graph out
if stream is None:
outfile = dest
elif stream.lower().strip() == 'stdout':
outfile = None
else:
LOG.error("I don't understand our stream.")
return
gu.write(self.graph, fmt, filename=outfile)
|
python
|
def write(self, fmt='turtle', stream=None):
"""
This convenience method will write out all of the graphs
associated with the source.
Right now these are hardcoded to be a single "graph"
and a "src_dataset.ttl" and a "src_test.ttl"
If you do not supply stream='stdout'
it will default write these to files.
In addition, if the version number isn't yet set in the dataset,
it will be set to the date on file.
:return: None
"""
fmt_ext = {
'rdfxml': 'xml',
'turtle': 'ttl',
'nt': 'nt', # ntriples
'nquads': 'nq',
'n3': 'n3' # notation3
}
# make the regular graph output file
dest = None
if self.name is not None:
dest = '/'.join((self.outdir, self.name))
if fmt in fmt_ext:
dest = '.'.join((dest, fmt_ext.get(fmt)))
else:
dest = '.'.join((dest, fmt))
LOG.info("Setting outfile to %s", dest)
# make the dataset_file name, always format as turtle
self.datasetfile = '/'.join(
(self.outdir, self.name + '_dataset.ttl'))
LOG.info("Setting dataset file to %s", self.datasetfile)
if self.dataset is not None and self.dataset.version is None:
self.dataset.set_version_by_date()
LOG.info("No version for %s setting to date issued.", self.name)
else:
LOG.warning("No output file set. Using stdout")
stream = 'stdout'
gu = GraphUtils(None)
# the _dataset description is always turtle
gu.write(self.dataset.getGraph(), 'turtle', filename=self.datasetfile)
if self.test_mode:
# unless we stop hardcoding, the test dataset is always turtle
LOG.info("Setting testfile to %s", self.testfile)
gu.write(self.testgraph, 'turtle', filename=self.testfile)
# print graph out
if stream is None:
outfile = dest
elif stream.lower().strip() == 'stdout':
outfile = None
else:
LOG.error("I don't understand our stream.")
return
gu.write(self.graph, fmt, filename=outfile)
|
[
"def",
"write",
"(",
"self",
",",
"fmt",
"=",
"'turtle'",
",",
"stream",
"=",
"None",
")",
":",
"fmt_ext",
"=",
"{",
"'rdfxml'",
":",
"'xml'",
",",
"'turtle'",
":",
"'ttl'",
",",
"'nt'",
":",
"'nt'",
",",
"# ntriples",
"'nquads'",
":",
"'nq'",
",",
"'n3'",
":",
"'n3'",
"# notation3",
"}",
"# make the regular graph output file",
"dest",
"=",
"None",
"if",
"self",
".",
"name",
"is",
"not",
"None",
":",
"dest",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"outdir",
",",
"self",
".",
"name",
")",
")",
"if",
"fmt",
"in",
"fmt_ext",
":",
"dest",
"=",
"'.'",
".",
"join",
"(",
"(",
"dest",
",",
"fmt_ext",
".",
"get",
"(",
"fmt",
")",
")",
")",
"else",
":",
"dest",
"=",
"'.'",
".",
"join",
"(",
"(",
"dest",
",",
"fmt",
")",
")",
"LOG",
".",
"info",
"(",
"\"Setting outfile to %s\"",
",",
"dest",
")",
"# make the dataset_file name, always format as turtle",
"self",
".",
"datasetfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"outdir",
",",
"self",
".",
"name",
"+",
"'_dataset.ttl'",
")",
")",
"LOG",
".",
"info",
"(",
"\"Setting dataset file to %s\"",
",",
"self",
".",
"datasetfile",
")",
"if",
"self",
".",
"dataset",
"is",
"not",
"None",
"and",
"self",
".",
"dataset",
".",
"version",
"is",
"None",
":",
"self",
".",
"dataset",
".",
"set_version_by_date",
"(",
")",
"LOG",
".",
"info",
"(",
"\"No version for %s setting to date issued.\"",
",",
"self",
".",
"name",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"\"No output file set. Using stdout\"",
")",
"stream",
"=",
"'stdout'",
"gu",
"=",
"GraphUtils",
"(",
"None",
")",
"# the _dataset description is always turtle",
"gu",
".",
"write",
"(",
"self",
".",
"dataset",
".",
"getGraph",
"(",
")",
",",
"'turtle'",
",",
"filename",
"=",
"self",
".",
"datasetfile",
")",
"if",
"self",
".",
"test_mode",
":",
"# unless we stop hardcoding, the test dataset is always turtle",
"LOG",
".",
"info",
"(",
"\"Setting testfile to %s\"",
",",
"self",
".",
"testfile",
")",
"gu",
".",
"write",
"(",
"self",
".",
"testgraph",
",",
"'turtle'",
",",
"filename",
"=",
"self",
".",
"testfile",
")",
"# print graph out",
"if",
"stream",
"is",
"None",
":",
"outfile",
"=",
"dest",
"elif",
"stream",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"==",
"'stdout'",
":",
"outfile",
"=",
"None",
"else",
":",
"LOG",
".",
"error",
"(",
"\"I don't understand our stream.\"",
")",
"return",
"gu",
".",
"write",
"(",
"self",
".",
"graph",
",",
"fmt",
",",
"filename",
"=",
"outfile",
")"
] |
This convenience method will write out all of the graphs
associated with the source.
Right now these are hardcoded to be a single "graph"
and a "src_dataset.ttl" and a "src_test.ttl"
If you do not supply stream='stdout'
it will default write these to files.
In addition, if the version number isn't yet set in the dataset,
it will be set to the date on file.
:return: None
|
[
"This",
"convenience",
"method",
"will",
"write",
"out",
"all",
"of",
"the",
"graphs",
"associated",
"with",
"the",
"source",
".",
"Right",
"now",
"these",
"are",
"hardcoded",
"to",
"be",
"a",
"single",
"graph",
"and",
"a",
"src_dataset",
".",
"ttl",
"and",
"a",
"src_test",
".",
"ttl",
"If",
"you",
"do",
"not",
"supply",
"stream",
"=",
"stdout",
"it",
"will",
"default",
"write",
"these",
"to",
"files",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L161-L223
|
18,355
|
monarch-initiative/dipper
|
dipper/sources/Source.py
|
Source.declareAsOntology
|
def declareAsOntology(self, graph):
"""
The file we output needs to be declared as an ontology,
including it's version information.
TEC: I am not convinced dipper reformating external data as RDF triples
makes an OWL ontology (nor that it should be considered a goal).
Proper ontologies are built by ontologists. Dipper reformats data
and anotates/decorates it with a minimal set of carefully arranged
terms drawn from from multiple proper ontologies.
Which allows the whole (dipper's RDF triples and parent ontologies)
to function as a single ontology we can reason over when combined
in a store such as SciGraph.
Including more than the minimal ontological terms in dipper's RDF
output constitutes a liability as it allows greater divergence
between dipper artifacts and the proper ontologies.
Further information will be augmented in the dataset object.
:param version:
:return:
"""
# <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ;
# owl:versionInfo
# <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl>
model = Model(graph)
# is self.outfile suffix set yet???
ontology_file_id = 'MonarchData:' + self.name + ".ttl"
model.addOntologyDeclaration(ontology_file_id)
# add timestamp as version info
cur_time = datetime.now()
t_string = cur_time.strftime("%Y-%m-%d")
ontology_version = t_string
# TEC this means the MonarchArchive IRI needs the release updated
# maybe extract the version info from there
# should not hardcode the suffix as it may change
archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl'
model.addOWLVersionIRI(ontology_file_id, archive_url)
model.addOWLVersionInfo(ontology_file_id, ontology_version)
|
python
|
def declareAsOntology(self, graph):
"""
The file we output needs to be declared as an ontology,
including it's version information.
TEC: I am not convinced dipper reformating external data as RDF triples
makes an OWL ontology (nor that it should be considered a goal).
Proper ontologies are built by ontologists. Dipper reformats data
and anotates/decorates it with a minimal set of carefully arranged
terms drawn from from multiple proper ontologies.
Which allows the whole (dipper's RDF triples and parent ontologies)
to function as a single ontology we can reason over when combined
in a store such as SciGraph.
Including more than the minimal ontological terms in dipper's RDF
output constitutes a liability as it allows greater divergence
between dipper artifacts and the proper ontologies.
Further information will be augmented in the dataset object.
:param version:
:return:
"""
# <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ;
# owl:versionInfo
# <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl>
model = Model(graph)
# is self.outfile suffix set yet???
ontology_file_id = 'MonarchData:' + self.name + ".ttl"
model.addOntologyDeclaration(ontology_file_id)
# add timestamp as version info
cur_time = datetime.now()
t_string = cur_time.strftime("%Y-%m-%d")
ontology_version = t_string
# TEC this means the MonarchArchive IRI needs the release updated
# maybe extract the version info from there
# should not hardcode the suffix as it may change
archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl'
model.addOWLVersionIRI(ontology_file_id, archive_url)
model.addOWLVersionInfo(ontology_file_id, ontology_version)
|
[
"def",
"declareAsOntology",
"(",
"self",
",",
"graph",
")",
":",
"# <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ;",
"# owl:versionInfo",
"# <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl>",
"model",
"=",
"Model",
"(",
"graph",
")",
"# is self.outfile suffix set yet???",
"ontology_file_id",
"=",
"'MonarchData:'",
"+",
"self",
".",
"name",
"+",
"\".ttl\"",
"model",
".",
"addOntologyDeclaration",
"(",
"ontology_file_id",
")",
"# add timestamp as version info",
"cur_time",
"=",
"datetime",
".",
"now",
"(",
")",
"t_string",
"=",
"cur_time",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"ontology_version",
"=",
"t_string",
"# TEC this means the MonarchArchive IRI needs the release updated",
"# maybe extract the version info from there",
"# should not hardcode the suffix as it may change",
"archive_url",
"=",
"'MonarchArchive:'",
"+",
"'ttl/'",
"+",
"self",
".",
"name",
"+",
"'.ttl'",
"model",
".",
"addOWLVersionIRI",
"(",
"ontology_file_id",
",",
"archive_url",
")",
"model",
".",
"addOWLVersionInfo",
"(",
"ontology_file_id",
",",
"ontology_version",
")"
] |
The file we output needs to be declared as an ontology,
including it's version information.
TEC: I am not convinced dipper reformating external data as RDF triples
makes an OWL ontology (nor that it should be considered a goal).
Proper ontologies are built by ontologists. Dipper reformats data
and anotates/decorates it with a minimal set of carefully arranged
terms drawn from from multiple proper ontologies.
Which allows the whole (dipper's RDF triples and parent ontologies)
to function as a single ontology we can reason over when combined
in a store such as SciGraph.
Including more than the minimal ontological terms in dipper's RDF
output constitutes a liability as it allows greater divergence
between dipper artifacts and the proper ontologies.
Further information will be augmented in the dataset object.
:param version:
:return:
|
[
"The",
"file",
"we",
"output",
"needs",
"to",
"be",
"declared",
"as",
"an",
"ontology",
"including",
"it",
"s",
"version",
"information",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L614-L660
|
18,356
|
monarch-initiative/dipper
|
dipper/sources/Source.py
|
Source.remove_backslash_r
|
def remove_backslash_r(filename, encoding):
"""
A helpful utility to remove Carriage Return from any file.
This will read a file into memory,
and overwrite the contents of the original file.
TODO: This function may be a liability
:param filename:
:return:
"""
with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader:
contents = filereader.read()
contents = re.sub(r'\r', '', contents)
with open(filename, "w") as filewriter:
filewriter.truncate()
filewriter.write(contents)
|
python
|
def remove_backslash_r(filename, encoding):
"""
A helpful utility to remove Carriage Return from any file.
This will read a file into memory,
and overwrite the contents of the original file.
TODO: This function may be a liability
:param filename:
:return:
"""
with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader:
contents = filereader.read()
contents = re.sub(r'\r', '', contents)
with open(filename, "w") as filewriter:
filewriter.truncate()
filewriter.write(contents)
|
[
"def",
"remove_backslash_r",
"(",
"filename",
",",
"encoding",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
",",
"encoding",
"=",
"encoding",
",",
"newline",
"=",
"r'\\n'",
")",
"as",
"filereader",
":",
"contents",
"=",
"filereader",
".",
"read",
"(",
")",
"contents",
"=",
"re",
".",
"sub",
"(",
"r'\\r'",
",",
"''",
",",
"contents",
")",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"filewriter",
":",
"filewriter",
".",
"truncate",
"(",
")",
"filewriter",
".",
"write",
"(",
"contents",
")"
] |
A helpful utility to remove Carriage Return from any file.
This will read a file into memory,
and overwrite the contents of the original file.
TODO: This function may be a liability
:param filename:
:return:
|
[
"A",
"helpful",
"utility",
"to",
"remove",
"Carriage",
"Return",
"from",
"any",
"file",
".",
"This",
"will",
"read",
"a",
"file",
"into",
"memory",
"and",
"overwrite",
"the",
"contents",
"of",
"the",
"original",
"file",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L664-L683
|
18,357
|
monarch-initiative/dipper
|
dipper/sources/Source.py
|
Source.load_local_translationtable
|
def load_local_translationtable(self, name):
'''
Load "ingest specific" translation from whatever they called something
to the ontology label we need to map it to.
To facilitate seeing more ontology lables in dipper ingests
a reverse mapping from ontology lables to external strings is also generated
and available as a dict localtcid
'''
localtt_file = 'translationtable/' + name + '.yaml'
try:
with open(localtt_file):
pass
except IOError:
# write a stub file as a place holder if none exists
with open(localtt_file, 'w') as write_yaml:
yaml.dump({name: name}, write_yaml)
finally:
with open(localtt_file, 'r') as read_yaml:
localtt = yaml.safe_load(read_yaml)
# inverse local translation.
# note: keeping this invertable will be work.
# Useful to not litter an ingest with external syntax
self.localtcid = {v: k for k, v in localtt.items()}
return localtt
|
python
|
def load_local_translationtable(self, name):
'''
Load "ingest specific" translation from whatever they called something
to the ontology label we need to map it to.
To facilitate seeing more ontology lables in dipper ingests
a reverse mapping from ontology lables to external strings is also generated
and available as a dict localtcid
'''
localtt_file = 'translationtable/' + name + '.yaml'
try:
with open(localtt_file):
pass
except IOError:
# write a stub file as a place holder if none exists
with open(localtt_file, 'w') as write_yaml:
yaml.dump({name: name}, write_yaml)
finally:
with open(localtt_file, 'r') as read_yaml:
localtt = yaml.safe_load(read_yaml)
# inverse local translation.
# note: keeping this invertable will be work.
# Useful to not litter an ingest with external syntax
self.localtcid = {v: k for k, v in localtt.items()}
return localtt
|
[
"def",
"load_local_translationtable",
"(",
"self",
",",
"name",
")",
":",
"localtt_file",
"=",
"'translationtable/'",
"+",
"name",
"+",
"'.yaml'",
"try",
":",
"with",
"open",
"(",
"localtt_file",
")",
":",
"pass",
"except",
"IOError",
":",
"# write a stub file as a place holder if none exists",
"with",
"open",
"(",
"localtt_file",
",",
"'w'",
")",
"as",
"write_yaml",
":",
"yaml",
".",
"dump",
"(",
"{",
"name",
":",
"name",
"}",
",",
"write_yaml",
")",
"finally",
":",
"with",
"open",
"(",
"localtt_file",
",",
"'r'",
")",
"as",
"read_yaml",
":",
"localtt",
"=",
"yaml",
".",
"safe_load",
"(",
"read_yaml",
")",
"# inverse local translation.",
"# note: keeping this invertable will be work.",
"# Useful to not litter an ingest with external syntax",
"self",
".",
"localtcid",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"localtt",
".",
"items",
"(",
")",
"}",
"return",
"localtt"
] |
Load "ingest specific" translation from whatever they called something
to the ontology label we need to map it to.
To facilitate seeing more ontology lables in dipper ingests
a reverse mapping from ontology lables to external strings is also generated
and available as a dict localtcid
|
[
"Load",
"ingest",
"specific",
"translation",
"from",
"whatever",
"they",
"called",
"something",
"to",
"the",
"ontology",
"label",
"we",
"need",
"to",
"map",
"it",
"to",
".",
"To",
"facilitate",
"seeing",
"more",
"ontology",
"lables",
"in",
"dipper",
"ingests",
"a",
"reverse",
"mapping",
"from",
"ontology",
"lables",
"to",
"external",
"strings",
"is",
"also",
"generated",
"and",
"available",
"as",
"a",
"dict",
"localtcid"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L739-L767
|
18,358
|
monarch-initiative/dipper
|
dipper/models/Genotype.py
|
Genotype.addGene
|
def addGene(
self, gene_id, gene_label, gene_type=None, gene_description=None
):
''' genes are classes '''
if gene_type is None:
gene_type = self.globaltt['gene']
self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description)
return
|
python
|
def addGene(
self, gene_id, gene_label, gene_type=None, gene_description=None
):
''' genes are classes '''
if gene_type is None:
gene_type = self.globaltt['gene']
self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description)
return
|
[
"def",
"addGene",
"(",
"self",
",",
"gene_id",
",",
"gene_label",
",",
"gene_type",
"=",
"None",
",",
"gene_description",
"=",
"None",
")",
":",
"if",
"gene_type",
"is",
"None",
":",
"gene_type",
"=",
"self",
".",
"globaltt",
"[",
"'gene'",
"]",
"self",
".",
"model",
".",
"addClassToGraph",
"(",
"gene_id",
",",
"gene_label",
",",
"gene_type",
",",
"gene_description",
")",
"return"
] |
genes are classes
|
[
"genes",
"are",
"classes"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L79-L87
|
18,359
|
monarch-initiative/dipper
|
dipper/utils/DipperUtil.py
|
DipperUtil.get_ncbi_taxon_num_by_label
|
def get_ncbi_taxon_num_by_label(label):
"""
Here we want to look up the NCBI Taxon id using some kind of label.
It will only return a result if there is a unique hit.
:return:
"""
req = {'db': 'taxonomy', 'retmode': 'json', 'term': label}
req.update(EREQ)
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
# Occasionally eutils returns the json blob
# {'ERROR': 'Invalid db name specified: taxonomy'}
if 'ERROR' in result:
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
tax_num = None
if 'count' in result and str(result['count']) == '1':
tax_num = result['idlist'][0]
else:
# TODO throw errors
LOG.warning('ESEARCH for taxon label "%s" returns %s', label, str(result))
return tax_num
|
python
|
def get_ncbi_taxon_num_by_label(label):
"""
Here we want to look up the NCBI Taxon id using some kind of label.
It will only return a result if there is a unique hit.
:return:
"""
req = {'db': 'taxonomy', 'retmode': 'json', 'term': label}
req.update(EREQ)
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
# Occasionally eutils returns the json blob
# {'ERROR': 'Invalid db name specified: taxonomy'}
if 'ERROR' in result:
request = SESSION.get(ESEARCH, params=req)
LOG.info('fetching: %s', request.url)
request.raise_for_status()
result = request.json()['esearchresult']
tax_num = None
if 'count' in result and str(result['count']) == '1':
tax_num = result['idlist'][0]
else:
# TODO throw errors
LOG.warning('ESEARCH for taxon label "%s" returns %s', label, str(result))
return tax_num
|
[
"def",
"get_ncbi_taxon_num_by_label",
"(",
"label",
")",
":",
"req",
"=",
"{",
"'db'",
":",
"'taxonomy'",
",",
"'retmode'",
":",
"'json'",
",",
"'term'",
":",
"label",
"}",
"req",
".",
"update",
"(",
"EREQ",
")",
"request",
"=",
"SESSION",
".",
"get",
"(",
"ESEARCH",
",",
"params",
"=",
"req",
")",
"LOG",
".",
"info",
"(",
"'fetching: %s'",
",",
"request",
".",
"url",
")",
"request",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"request",
".",
"json",
"(",
")",
"[",
"'esearchresult'",
"]",
"# Occasionally eutils returns the json blob",
"# {'ERROR': 'Invalid db name specified: taxonomy'}",
"if",
"'ERROR'",
"in",
"result",
":",
"request",
"=",
"SESSION",
".",
"get",
"(",
"ESEARCH",
",",
"params",
"=",
"req",
")",
"LOG",
".",
"info",
"(",
"'fetching: %s'",
",",
"request",
".",
"url",
")",
"request",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"request",
".",
"json",
"(",
")",
"[",
"'esearchresult'",
"]",
"tax_num",
"=",
"None",
"if",
"'count'",
"in",
"result",
"and",
"str",
"(",
"result",
"[",
"'count'",
"]",
")",
"==",
"'1'",
":",
"tax_num",
"=",
"result",
"[",
"'idlist'",
"]",
"[",
"0",
"]",
"else",
":",
"# TODO throw errors",
"LOG",
".",
"warning",
"(",
"'ESEARCH for taxon label \"%s\" returns %s'",
",",
"label",
",",
"str",
"(",
"result",
")",
")",
"return",
"tax_num"
] |
Here we want to look up the NCBI Taxon id using some kind of label.
It will only return a result if there is a unique hit.
:return:
|
[
"Here",
"we",
"want",
"to",
"look",
"up",
"the",
"NCBI",
"Taxon",
"id",
"using",
"some",
"kind",
"of",
"label",
".",
"It",
"will",
"only",
"return",
"a",
"result",
"if",
"there",
"is",
"a",
"unique",
"hit",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/DipperUtil.py#L47-L78
|
18,360
|
monarch-initiative/dipper
|
dipper/models/assoc/Association.py
|
Assoc.set_association_id
|
def set_association_id(self, assoc_id=None):
"""
This will set the association ID based on the internal parts
of the association.
To be used in cases where an external association identifier
should be used.
:param assoc_id:
:return:
"""
if assoc_id is None:
self.assoc_id = self.make_association_id(
self.definedby, self.sub, self.rel, self.obj)
else:
self.assoc_id = assoc_id
return self.assoc_id
|
python
|
def set_association_id(self, assoc_id=None):
"""
This will set the association ID based on the internal parts
of the association.
To be used in cases where an external association identifier
should be used.
:param assoc_id:
:return:
"""
if assoc_id is None:
self.assoc_id = self.make_association_id(
self.definedby, self.sub, self.rel, self.obj)
else:
self.assoc_id = assoc_id
return self.assoc_id
|
[
"def",
"set_association_id",
"(",
"self",
",",
"assoc_id",
"=",
"None",
")",
":",
"if",
"assoc_id",
"is",
"None",
":",
"self",
".",
"assoc_id",
"=",
"self",
".",
"make_association_id",
"(",
"self",
".",
"definedby",
",",
"self",
".",
"sub",
",",
"self",
".",
"rel",
",",
"self",
".",
"obj",
")",
"else",
":",
"self",
".",
"assoc_id",
"=",
"assoc_id",
"return",
"self",
".",
"assoc_id"
] |
This will set the association ID based on the internal parts
of the association.
To be used in cases where an external association identifier
should be used.
:param assoc_id:
:return:
|
[
"This",
"will",
"set",
"the",
"association",
"ID",
"based",
"on",
"the",
"internal",
"parts",
"of",
"the",
"association",
".",
"To",
"be",
"used",
"in",
"cases",
"where",
"an",
"external",
"association",
"identifier",
"should",
"be",
"used",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L166-L184
|
18,361
|
monarch-initiative/dipper
|
dipper/models/assoc/Association.py
|
Assoc.make_association_id
|
def make_association_id(definedby, sub, pred, obj, attributes=None):
"""
A method to create unique identifiers for OBAN-style associations,
based on all the parts of the association
If any of the items is empty or None, it will convert it to blank.
It effectively digests the string of concatonated values.
Subclasses of Assoc can submit an additional array of attributes
that will be appeded to the ID.
Note this is equivalent to a RDF blank node
:param definedby: The (data) resource that provided the annotation
:param subject:
:param predicate:
:param object:
:param attributes:
:return:
"""
items_to_hash = [definedby, sub, pred, obj]
if attributes is not None and len(attributes) > 0:
items_to_hash += attributes
items_to_hash = [x for x in items_to_hash if x is not None]
assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash))))
assert assoc_id is not None
return assoc_id
|
python
|
def make_association_id(definedby, sub, pred, obj, attributes=None):
"""
A method to create unique identifiers for OBAN-style associations,
based on all the parts of the association
If any of the items is empty or None, it will convert it to blank.
It effectively digests the string of concatonated values.
Subclasses of Assoc can submit an additional array of attributes
that will be appeded to the ID.
Note this is equivalent to a RDF blank node
:param definedby: The (data) resource that provided the annotation
:param subject:
:param predicate:
:param object:
:param attributes:
:return:
"""
items_to_hash = [definedby, sub, pred, obj]
if attributes is not None and len(attributes) > 0:
items_to_hash += attributes
items_to_hash = [x for x in items_to_hash if x is not None]
assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash))))
assert assoc_id is not None
return assoc_id
|
[
"def",
"make_association_id",
"(",
"definedby",
",",
"sub",
",",
"pred",
",",
"obj",
",",
"attributes",
"=",
"None",
")",
":",
"items_to_hash",
"=",
"[",
"definedby",
",",
"sub",
",",
"pred",
",",
"obj",
"]",
"if",
"attributes",
"is",
"not",
"None",
"and",
"len",
"(",
"attributes",
")",
">",
"0",
":",
"items_to_hash",
"+=",
"attributes",
"items_to_hash",
"=",
"[",
"x",
"for",
"x",
"in",
"items_to_hash",
"if",
"x",
"is",
"not",
"None",
"]",
"assoc_id",
"=",
"':'",
".",
"join",
"(",
"(",
"'MONARCH'",
",",
"GraphUtils",
".",
"digest_id",
"(",
"'+'",
".",
"join",
"(",
"items_to_hash",
")",
")",
")",
")",
"assert",
"assoc_id",
"is",
"not",
"None",
"return",
"assoc_id"
] |
A method to create unique identifiers for OBAN-style associations,
based on all the parts of the association
If any of the items is empty or None, it will convert it to blank.
It effectively digests the string of concatonated values.
Subclasses of Assoc can submit an additional array of attributes
that will be appeded to the ID.
Note this is equivalent to a RDF blank node
:param definedby: The (data) resource that provided the annotation
:param subject:
:param predicate:
:param object:
:param attributes:
:return:
|
[
"A",
"method",
"to",
"create",
"unique",
"identifiers",
"for",
"OBAN",
"-",
"style",
"associations",
"based",
"on",
"all",
"the",
"parts",
"of",
"the",
"association",
"If",
"any",
"of",
"the",
"items",
"is",
"empty",
"or",
"None",
"it",
"will",
"convert",
"it",
"to",
"blank",
".",
"It",
"effectively",
"digests",
"the",
"string",
"of",
"concatonated",
"values",
".",
"Subclasses",
"of",
"Assoc",
"can",
"submit",
"an",
"additional",
"array",
"of",
"attributes",
"that",
"will",
"be",
"appeded",
"to",
"the",
"ID",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L250-L279
|
18,362
|
monarch-initiative/dipper
|
dipper/utils/romanplus.py
|
toRoman
|
def toRoman(num):
"""convert integer to Roman numeral"""
if not 0 < num < 5000:
raise ValueError("number %n out of range (must be 1..4999)", num)
if int(num) != num:
raise TypeError("decimals %n can not be converted", num)
result = ""
for numeral, integer in romanNumeralMap:
while num >= integer:
result += numeral
num -= integer
return result
|
python
|
def toRoman(num):
"""convert integer to Roman numeral"""
if not 0 < num < 5000:
raise ValueError("number %n out of range (must be 1..4999)", num)
if int(num) != num:
raise TypeError("decimals %n can not be converted", num)
result = ""
for numeral, integer in romanNumeralMap:
while num >= integer:
result += numeral
num -= integer
return result
|
[
"def",
"toRoman",
"(",
"num",
")",
":",
"if",
"not",
"0",
"<",
"num",
"<",
"5000",
":",
"raise",
"ValueError",
"(",
"\"number %n out of range (must be 1..4999)\"",
",",
"num",
")",
"if",
"int",
"(",
"num",
")",
"!=",
"num",
":",
"raise",
"TypeError",
"(",
"\"decimals %n can not be converted\"",
",",
"num",
")",
"result",
"=",
"\"\"",
"for",
"numeral",
",",
"integer",
"in",
"romanNumeralMap",
":",
"while",
"num",
">=",
"integer",
":",
"result",
"+=",
"numeral",
"num",
"-=",
"integer",
"return",
"result"
] |
convert integer to Roman numeral
|
[
"convert",
"integer",
"to",
"Roman",
"numeral"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L38-L50
|
18,363
|
monarch-initiative/dipper
|
dipper/utils/romanplus.py
|
fromRoman
|
def fromRoman(strng):
"""convert Roman numeral to integer"""
if not strng:
raise TypeError('Input can not be blank')
if not romanNumeralPattern.search(strng):
raise ValueError('Invalid Roman numeral: %s', strng)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while strng[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
|
python
|
def fromRoman(strng):
"""convert Roman numeral to integer"""
if not strng:
raise TypeError('Input can not be blank')
if not romanNumeralPattern.search(strng):
raise ValueError('Invalid Roman numeral: %s', strng)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while strng[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
|
[
"def",
"fromRoman",
"(",
"strng",
")",
":",
"if",
"not",
"strng",
":",
"raise",
"TypeError",
"(",
"'Input can not be blank'",
")",
"if",
"not",
"romanNumeralPattern",
".",
"search",
"(",
"strng",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid Roman numeral: %s'",
",",
"strng",
")",
"result",
"=",
"0",
"index",
"=",
"0",
"for",
"numeral",
",",
"integer",
"in",
"romanNumeralMap",
":",
"while",
"strng",
"[",
"index",
":",
"index",
"+",
"len",
"(",
"numeral",
")",
"]",
"==",
"numeral",
":",
"result",
"+=",
"integer",
"index",
"+=",
"len",
"(",
"numeral",
")",
"return",
"result"
] |
convert Roman numeral to integer
|
[
"convert",
"Roman",
"numeral",
"to",
"integer"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L70-L83
|
18,364
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_genotype_backgrounds
|
def _process_genotype_backgrounds(self, limit=None):
"""
This table provides a mapping of genotypes to background genotypes
Note that the background_id is also a genotype_id.
Makes these triples:
<ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id>
<ZFIN:background_id> a GENO:genomic_background
<ZFIN:background_id> in_taxon <taxon_id>
<taxon_id> a class
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing genotype backgrounds")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['backgrounds']['file']))
geno = Genotype(graph)
# Add the taxon as a class
taxon_id = self.globaltt['Danio rerio']
model.addClassToGraph(taxon_id, None)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
# Genotype_ID Genotype_Name Background Background_Name
(genotype_id, genotype_name, background_id, unused) = row
if self.test_mode and genotype_id not in self.test_ids['genotype']:
continue
genotype_id = 'ZFIN:' + genotype_id.strip()
background_id = 'ZFIN:' + background_id.strip()
# store this in the hash for later lookup
# when building fish genotypes
self.genotype_backgrounds[genotype_id] = background_id
# add the background into the graph,
# in case we haven't seen it before
geno.addGenomicBackground(background_id, None)
# hang the taxon from the background
geno.addTaxon(taxon_id, background_id)
# add the intrinsic genotype to the graph
# we DO NOT ADD THE LABEL here
# as it doesn't include the background
geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype'])
# Add background to the intrinsic genotype
geno.addGenomicBackgroundToGenotype(background_id, genotype_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genotype backgrounds")
return
|
python
|
def _process_genotype_backgrounds(self, limit=None):
"""
This table provides a mapping of genotypes to background genotypes
Note that the background_id is also a genotype_id.
Makes these triples:
<ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id>
<ZFIN:background_id> a GENO:genomic_background
<ZFIN:background_id> in_taxon <taxon_id>
<taxon_id> a class
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing genotype backgrounds")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['backgrounds']['file']))
geno = Genotype(graph)
# Add the taxon as a class
taxon_id = self.globaltt['Danio rerio']
model.addClassToGraph(taxon_id, None)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
# Genotype_ID Genotype_Name Background Background_Name
(genotype_id, genotype_name, background_id, unused) = row
if self.test_mode and genotype_id not in self.test_ids['genotype']:
continue
genotype_id = 'ZFIN:' + genotype_id.strip()
background_id = 'ZFIN:' + background_id.strip()
# store this in the hash for later lookup
# when building fish genotypes
self.genotype_backgrounds[genotype_id] = background_id
# add the background into the graph,
# in case we haven't seen it before
geno.addGenomicBackground(background_id, None)
# hang the taxon from the background
geno.addTaxon(taxon_id, background_id)
# add the intrinsic genotype to the graph
# we DO NOT ADD THE LABEL here
# as it doesn't include the background
geno.addGenotype(genotype_id, None, self.globaltt['intrinsic_genotype'])
# Add background to the intrinsic genotype
geno.addGenomicBackgroundToGenotype(background_id, genotype_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genotype backgrounds")
return
|
[
"def",
"_process_genotype_backgrounds",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"LOG",
".",
"info",
"(",
"\"Processing genotype backgrounds\"",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'backgrounds'",
"]",
"[",
"'file'",
"]",
")",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"# Add the taxon as a class",
"taxon_id",
"=",
"self",
".",
"globaltt",
"[",
"'Danio rerio'",
"]",
"model",
".",
"addClassToGraph",
"(",
"taxon_id",
",",
"None",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"# Genotype_ID \tGenotype_Name \tBackground \tBackground_Name",
"(",
"genotype_id",
",",
"genotype_name",
",",
"background_id",
",",
"unused",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"genotype_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'genotype'",
"]",
":",
"continue",
"genotype_id",
"=",
"'ZFIN:'",
"+",
"genotype_id",
".",
"strip",
"(",
")",
"background_id",
"=",
"'ZFIN:'",
"+",
"background_id",
".",
"strip",
"(",
")",
"# store this in the hash for later lookup",
"# when building fish genotypes",
"self",
".",
"genotype_backgrounds",
"[",
"genotype_id",
"]",
"=",
"background_id",
"# add the background into the graph,",
"# in case we haven't seen it before",
"geno",
".",
"addGenomicBackground",
"(",
"background_id",
",",
"None",
")",
"# hang the taxon from the background",
"geno",
".",
"addTaxon",
"(",
"taxon_id",
",",
"background_id",
")",
"# add the intrinsic genotype to the graph",
"# we DO NOT ADD THE LABEL here",
"# as it doesn't include the background",
"geno",
".",
"addGenotype",
"(",
"genotype_id",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'intrinsic_genotype'",
"]",
")",
"# Add background to the intrinsic genotype",
"geno",
".",
"addGenomicBackgroundToGenotype",
"(",
"background_id",
",",
"genotype_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with genotype backgrounds\"",
")",
"return"
] |
This table provides a mapping of genotypes to background genotypes
Note that the background_id is also a genotype_id.
Makes these triples:
<ZFIN:genotype_id> GENO:has_reference_part <ZFIN:background_id>
<ZFIN:background_id> a GENO:genomic_background
<ZFIN:background_id> in_taxon <taxon_id>
<taxon_id> a class
:param limit:
:return:
|
[
"This",
"table",
"provides",
"a",
"mapping",
"of",
"genotypes",
"to",
"background",
"genotypes",
"Note",
"that",
"the",
"background_id",
"is",
"also",
"a",
"genotype_id",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1048-L1113
|
18,365
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_stages
|
def _process_stages(self, limit=None):
"""
This table provides mappings between ZFIN stage IDs and ZFS terms,
and includes the starting and ending hours for the developmental stage.
Currently only processing the mapping from the ZFIN stage ID
to the ZFS ID.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing stages")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['stage']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(stage_id, stage_obo_id, stage_name, begin_hours, end_hours
# ,empty # till next time
) = row
# Add the stage as a class, and it's obo equivalent
stage_id = 'ZFIN:' + stage_id.strip()
model.addClassToGraph(stage_id, stage_name)
model.addEquivalentClass(stage_id, stage_obo_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with stages")
return
|
python
|
def _process_stages(self, limit=None):
"""
This table provides mappings between ZFIN stage IDs and ZFS terms,
and includes the starting and ending hours for the developmental stage.
Currently only processing the mapping from the ZFIN stage ID
to the ZFS ID.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing stages")
line_counter = 0
raw = '/'.join((self.rawdir, self.files['stage']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(stage_id, stage_obo_id, stage_name, begin_hours, end_hours
# ,empty # till next time
) = row
# Add the stage as a class, and it's obo equivalent
stage_id = 'ZFIN:' + stage_id.strip()
model.addClassToGraph(stage_id, stage_name)
model.addEquivalentClass(stage_id, stage_obo_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with stages")
return
|
[
"def",
"_process_stages",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"LOG",
".",
"info",
"(",
"\"Processing stages\"",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'stage'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"stage_id",
",",
"stage_obo_id",
",",
"stage_name",
",",
"begin_hours",
",",
"end_hours",
"# ,empty # till next time",
")",
"=",
"row",
"# Add the stage as a class, and it's obo equivalent",
"stage_id",
"=",
"'ZFIN:'",
"+",
"stage_id",
".",
"strip",
"(",
")",
"model",
".",
"addClassToGraph",
"(",
"stage_id",
",",
"stage_name",
")",
"model",
".",
"addEquivalentClass",
"(",
"stage_id",
",",
"stage_obo_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with stages\"",
")",
"return"
] |
This table provides mappings between ZFIN stage IDs and ZFS terms,
and includes the starting and ending hours for the developmental stage.
Currently only processing the mapping from the ZFIN stage ID
to the ZFS ID.
:param limit:
:return:
|
[
"This",
"table",
"provides",
"mappings",
"between",
"ZFIN",
"stage",
"IDs",
"and",
"ZFS",
"terms",
"and",
"includes",
"the",
"starting",
"and",
"ending",
"hours",
"for",
"the",
"developmental",
"stage",
".",
"Currently",
"only",
"processing",
"the",
"mapping",
"from",
"the",
"ZFIN",
"stage",
"ID",
"to",
"the",
"ZFS",
"ID",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1174-L1211
|
18,366
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_genes
|
def _process_genes(self, limit=None):
"""
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, ncbi_gene_id
# , empty # till next time
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip()
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
geno.addGene(gene_id, gene_symbol)
model.addEquivalentClass(gene_id, ncbi_gene_id)
LOG.info("Done with genes")
return
|
python
|
def _process_genes(self, limit=None):
"""
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, ncbi_gene_id
# , empty # till next time
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip()
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
geno.addGene(gene_id, gene_symbol)
model.addEquivalentClass(gene_id, ncbi_gene_id)
LOG.info("Done with genes")
return
|
[
"def",
"_process_genes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing genes\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'gene'",
"]",
"[",
"'file'",
"]",
")",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"ncbi_gene_id",
"# , empty # till next time",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'gene'",
"]",
":",
"continue",
"gene_id",
"=",
"'ZFIN:'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"ncbi_gene_id",
"=",
"'NCBIGene:'",
"+",
"ncbi_gene_id",
".",
"strip",
"(",
")",
"self",
".",
"id_label_map",
"[",
"gene_id",
"]",
"=",
"gene_symbol",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"geno",
".",
"addGene",
"(",
"gene_id",
",",
"gene_symbol",
")",
"model",
".",
"addEquivalentClass",
"(",
"gene_id",
",",
"ncbi_gene_id",
")",
"LOG",
".",
"info",
"(",
"\"Done with genes\"",
")",
"return"
] |
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
|
[
"This",
"table",
"provides",
"the",
"ZFIN",
"gene",
"id",
"the",
"SO",
"type",
"of",
"the",
"gene",
"the",
"gene",
"symbol",
"and",
"the",
"NCBI",
"Gene",
"ID",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1390-L1437
|
18,367
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_features
|
def _process_features(self, limit=None):
"""
This module provides information for the intrinsic
and extrinsic genotype features of zebrafish.
All items here are 'alterations', and are therefore instances.
sequence alteration ID, SO type, abbreviation, and relationship to
the affected gene, with the gene's ID, symbol,
and SO type (gene/pseudogene).
Triples created:
<gene id> a class:
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing features")
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['features']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(genomic_feature_id, feature_so_id,
genomic_feature_abbreviation, genomic_feature_name,
genomic_feature_type, mutagen, mutagee, construct_id,
construct_name, construct_so_id, talen_crispr_id,
talen_crispr_nam
# , empty
) = row
if self.test_mode and (
genomic_feature_id not in self.test_ids['allele']):
continue
genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip()
model.addIndividualToGraph(
genomic_feature_id, genomic_feature_name, feature_so_id)
model.addSynonym(
genomic_feature_id, genomic_feature_abbreviation)
if construct_id is not None and construct_id != '':
construct_id = 'ZFIN:' + construct_id.strip()
geno.addConstruct(
construct_id, construct_name, construct_so_id)
geno.addSequenceDerivesFrom(
genomic_feature_id, construct_id)
# Note, we don't really care about how the variant was derived.
# so we skip that.
# add to the id-label map
self.id_label_map[
genomic_feature_id] = genomic_feature_abbreviation
self.id_label_map[construct_id] = construct_name
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with features")
return
|
python
|
def _process_features(self, limit=None):
"""
This module provides information for the intrinsic
and extrinsic genotype features of zebrafish.
All items here are 'alterations', and are therefore instances.
sequence alteration ID, SO type, abbreviation, and relationship to
the affected gene, with the gene's ID, symbol,
and SO type (gene/pseudogene).
Triples created:
<gene id> a class:
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing features")
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['features']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(genomic_feature_id, feature_so_id,
genomic_feature_abbreviation, genomic_feature_name,
genomic_feature_type, mutagen, mutagee, construct_id,
construct_name, construct_so_id, talen_crispr_id,
talen_crispr_nam
# , empty
) = row
if self.test_mode and (
genomic_feature_id not in self.test_ids['allele']):
continue
genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip()
model.addIndividualToGraph(
genomic_feature_id, genomic_feature_name, feature_so_id)
model.addSynonym(
genomic_feature_id, genomic_feature_abbreviation)
if construct_id is not None and construct_id != '':
construct_id = 'ZFIN:' + construct_id.strip()
geno.addConstruct(
construct_id, construct_name, construct_so_id)
geno.addSequenceDerivesFrom(
genomic_feature_id, construct_id)
# Note, we don't really care about how the variant was derived.
# so we skip that.
# add to the id-label map
self.id_label_map[
genomic_feature_id] = genomic_feature_abbreviation
self.id_label_map[construct_id] = construct_name
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with features")
return
|
[
"def",
"_process_features",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"LOG",
".",
"info",
"(",
"\"Processing features\"",
")",
"line_counter",
"=",
"0",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'features'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"genomic_feature_id",
",",
"feature_so_id",
",",
"genomic_feature_abbreviation",
",",
"genomic_feature_name",
",",
"genomic_feature_type",
",",
"mutagen",
",",
"mutagee",
",",
"construct_id",
",",
"construct_name",
",",
"construct_so_id",
",",
"talen_crispr_id",
",",
"talen_crispr_nam",
"# , empty",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"(",
"genomic_feature_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'allele'",
"]",
")",
":",
"continue",
"genomic_feature_id",
"=",
"'ZFIN:'",
"+",
"genomic_feature_id",
".",
"strip",
"(",
")",
"model",
".",
"addIndividualToGraph",
"(",
"genomic_feature_id",
",",
"genomic_feature_name",
",",
"feature_so_id",
")",
"model",
".",
"addSynonym",
"(",
"genomic_feature_id",
",",
"genomic_feature_abbreviation",
")",
"if",
"construct_id",
"is",
"not",
"None",
"and",
"construct_id",
"!=",
"''",
":",
"construct_id",
"=",
"'ZFIN:'",
"+",
"construct_id",
".",
"strip",
"(",
")",
"geno",
".",
"addConstruct",
"(",
"construct_id",
",",
"construct_name",
",",
"construct_so_id",
")",
"geno",
".",
"addSequenceDerivesFrom",
"(",
"genomic_feature_id",
",",
"construct_id",
")",
"# Note, we don't really care about how the variant was derived.",
"# so we skip that.",
"# add to the id-label map",
"self",
".",
"id_label_map",
"[",
"genomic_feature_id",
"]",
"=",
"genomic_feature_abbreviation",
"self",
".",
"id_label_map",
"[",
"construct_id",
"]",
"=",
"construct_name",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with features\"",
")",
"return"
] |
This module provides information for the intrinsic
and extrinsic genotype features of zebrafish.
All items here are 'alterations', and are therefore instances.
sequence alteration ID, SO type, abbreviation, and relationship to
the affected gene, with the gene's ID, symbol,
and SO type (gene/pseudogene).
Triples created:
<gene id> a class:
:param limit:
:return:
|
[
"This",
"module",
"provides",
"information",
"for",
"the",
"intrinsic",
"and",
"extrinsic",
"genotype",
"features",
"of",
"zebrafish",
".",
"All",
"items",
"here",
"are",
"alterations",
"and",
"are",
"therefore",
"instances",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1439-L1505
|
18,368
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_pubinfo
|
def _process_pubinfo(self, limit=None):
"""
This will pull the zfin internal publication information,
and map them to their equivalent pmid, and make labels.
Triples created:
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pubs']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages) = row
except ValueError:
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages
# , empty
) = row
except ValueError:
LOG.warning("Error parsing row %s: ", row)
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
# trim the author list for ease of reading
alist = re.split(r',', authors)
if len(alist) > 1:
astring = ' '.join((alist[0].strip(), 'et al'))
else:
astring = authors
pub_label = '; '.join((astring, title, journal, year, vol, pages))
ref = Reference(graph, pub_id)
ref.setShortCitation(pub_label)
ref.setYear(year)
ref.setTitle(title)
if pubmed_id is not None and pubmed_id != '':
# let's make an assumption that if there's a pubmed id,
# that it is a journal article
ref.setType(self.globaltt['journal article'])
pubmed_id = 'PMID:' + pubmed_id.strip()
rpm = Reference(graph, pubmed_id, self.globaltt['journal article'])
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
model.makeLeader(pubmed_id)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
python
|
def _process_pubinfo(self, limit=None):
"""
This will pull the zfin internal publication information,
and map them to their equivalent pmid, and make labels.
Triples created:
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pubs']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages) = row
except ValueError:
try:
(pub_id, pubmed_id, authors, title,
journal, year, vol, pages
# , empty
) = row
except ValueError:
LOG.warning("Error parsing row %s: ", row)
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
# trim the author list for ease of reading
alist = re.split(r',', authors)
if len(alist) > 1:
astring = ' '.join((alist[0].strip(), 'et al'))
else:
astring = authors
pub_label = '; '.join((astring, title, journal, year, vol, pages))
ref = Reference(graph, pub_id)
ref.setShortCitation(pub_label)
ref.setYear(year)
ref.setTitle(title)
if pubmed_id is not None and pubmed_id != '':
# let's make an assumption that if there's a pubmed id,
# that it is a journal article
ref.setType(self.globaltt['journal article'])
pubmed_id = 'PMID:' + pubmed_id.strip()
rpm = Reference(graph, pubmed_id, self.globaltt['journal article'])
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
model.makeLeader(pubmed_id)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_pubinfo",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"line_counter",
"=",
"0",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'pubs'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"latin-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"try",
":",
"(",
"pub_id",
",",
"pubmed_id",
",",
"authors",
",",
"title",
",",
"journal",
",",
"year",
",",
"vol",
",",
"pages",
")",
"=",
"row",
"except",
"ValueError",
":",
"try",
":",
"(",
"pub_id",
",",
"pubmed_id",
",",
"authors",
",",
"title",
",",
"journal",
",",
"year",
",",
"vol",
",",
"pages",
"# , empty",
")",
"=",
"row",
"except",
"ValueError",
":",
"LOG",
".",
"warning",
"(",
"\"Error parsing row %s: \"",
",",
"row",
")",
"if",
"self",
".",
"test_mode",
"and",
"(",
"'ZFIN:'",
"+",
"pub_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'pub'",
"]",
"and",
"'PMID:'",
"+",
"pubmed_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'pub'",
"]",
")",
":",
"continue",
"pub_id",
"=",
"'ZFIN:'",
"+",
"pub_id",
".",
"strip",
"(",
")",
"# trim the author list for ease of reading",
"alist",
"=",
"re",
".",
"split",
"(",
"r','",
",",
"authors",
")",
"if",
"len",
"(",
"alist",
")",
">",
"1",
":",
"astring",
"=",
"' '",
".",
"join",
"(",
"(",
"alist",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'et al'",
")",
")",
"else",
":",
"astring",
"=",
"authors",
"pub_label",
"=",
"'; '",
".",
"join",
"(",
"(",
"astring",
",",
"title",
",",
"journal",
",",
"year",
",",
"vol",
",",
"pages",
")",
")",
"ref",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
")",
"ref",
".",
"setShortCitation",
"(",
"pub_label",
")",
"ref",
".",
"setYear",
"(",
"year",
")",
"ref",
".",
"setTitle",
"(",
"title",
")",
"if",
"pubmed_id",
"is",
"not",
"None",
"and",
"pubmed_id",
"!=",
"''",
":",
"# let's make an assumption that if there's a pubmed id,",
"# that it is a journal article",
"ref",
".",
"setType",
"(",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
")",
"pubmed_id",
"=",
"'PMID:'",
"+",
"pubmed_id",
".",
"strip",
"(",
")",
"rpm",
"=",
"Reference",
"(",
"graph",
",",
"pubmed_id",
",",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
")",
"rpm",
".",
"addRefToGraph",
"(",
")",
"model",
".",
"addSameIndividual",
"(",
"pub_id",
",",
"pubmed_id",
")",
"model",
".",
"makeLeader",
"(",
"pubmed_id",
")",
"ref",
".",
"addRefToGraph",
"(",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
This will pull the zfin internal publication information,
and map them to their equivalent pmid, and make labels.
Triples created:
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
|
[
"This",
"will",
"pull",
"the",
"zfin",
"internal",
"publication",
"information",
"and",
"map",
"them",
"to",
"their",
"equivalent",
"pmid",
"and",
"make",
"labels",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1775-L1851
|
18,369
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_pub2pubmed
|
def _process_pub2pubmed(self, limit=None):
"""
This will pull the zfin internal publication to pubmed mappings.
Somewhat redundant with the process_pubinfo method,
but this includes additional mappings.
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pub2pubmed']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pub_id, pubmed_id
# , empty
) = row
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
rtype = None
if pubmed_id != '' and pubmed_id is not None:
pubmed_id = 'PMID:' + pubmed_id.strip()
rtype = self.globaltt['journal article']
rpm = Reference(graph, pubmed_id, rtype)
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
ref = Reference(graph, pub_id, rtype)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
python
|
def _process_pub2pubmed(self, limit=None):
"""
This will pull the zfin internal publication to pubmed mappings.
Somewhat redundant with the process_pubinfo method,
but this includes additional mappings.
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['pub2pubmed']['file']))
with open(raw, 'r', encoding="latin-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(pub_id, pubmed_id
# , empty
) = row
if self.test_mode and (
'ZFIN:' + pub_id not in self.test_ids['pub'] and
'PMID:' + pubmed_id not in self.test_ids['pub']):
continue
pub_id = 'ZFIN:' + pub_id.strip()
rtype = None
if pubmed_id != '' and pubmed_id is not None:
pubmed_id = 'PMID:' + pubmed_id.strip()
rtype = self.globaltt['journal article']
rpm = Reference(graph, pubmed_id, rtype)
rpm.addRefToGraph()
model.addSameIndividual(pub_id, pubmed_id)
ref = Reference(graph, pub_id, rtype)
ref.addRefToGraph()
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_pub2pubmed",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"line_counter",
"=",
"0",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'pub2pubmed'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"latin-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"pub_id",
",",
"pubmed_id",
"# , empty",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"(",
"'ZFIN:'",
"+",
"pub_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'pub'",
"]",
"and",
"'PMID:'",
"+",
"pubmed_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'pub'",
"]",
")",
":",
"continue",
"pub_id",
"=",
"'ZFIN:'",
"+",
"pub_id",
".",
"strip",
"(",
")",
"rtype",
"=",
"None",
"if",
"pubmed_id",
"!=",
"''",
"and",
"pubmed_id",
"is",
"not",
"None",
":",
"pubmed_id",
"=",
"'PMID:'",
"+",
"pubmed_id",
".",
"strip",
"(",
")",
"rtype",
"=",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
"rpm",
"=",
"Reference",
"(",
"graph",
",",
"pubmed_id",
",",
"rtype",
")",
"rpm",
".",
"addRefToGraph",
"(",
")",
"model",
".",
"addSameIndividual",
"(",
"pub_id",
",",
"pubmed_id",
")",
"ref",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
",",
"rtype",
")",
"ref",
".",
"addRefToGraph",
"(",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
This will pull the zfin internal publication to pubmed mappings.
Somewhat redundant with the process_pubinfo method,
but this includes additional mappings.
<pub_id> is an individual
<pub_id> rdfs:label <pub_label>
<pubmed_id> is an individual
<pubmed_id> rdfs:label <pub_label>
<pub_id> sameIndividual <pubmed_id>
:param limit:
:return:
|
[
"This",
"will",
"pull",
"the",
"zfin",
"internal",
"publication",
"to",
"pubmed",
"mappings",
".",
"Somewhat",
"redundant",
"with",
"the",
"process_pubinfo",
"method",
"but",
"this",
"includes",
"additional",
"mappings",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1853-L1901
|
18,370
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_targeting_reagents
|
def _process_targeting_reagents(self, reagent_type, limit=None):
"""
This method processes the gene targeting knockdown reagents,
such as morpholinos, talens, and crisprs.
We create triples for the reagents and pass the data into a hash map
for use in the pheno_enviro method.
Morpholinos work similar to RNAi.
TALENs are artificial restriction enzymes
that can be used for genome editing in situ.
CRISPRs are knockdown reagents, working similar to RNAi
but at the transcriptional level instead of mRNA level.
You can read more about TALEN and CRISPR techniques in review
[Gaj et al]
http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5
TODO add sequences
Triples created:
<reagent_id> is a gene_targeting_reagent
<reagent_id> rdfs:label <reagent_symbol>
<reagent_id> has type <reagent_so_id>
<reagent_id> has comment <note>
<publication_id> is an individual
<publication_id> mentions <morpholino_id>
:param reagent_type: should be one of: morph, talen, crispr
:param limit:
:return:
"""
LOG.info("Processing Gene Targeting Reagents")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
if reagent_type not in ['morph', 'talen', 'crispr']:
LOG.error("You didn't specify the right kind of file type.")
return
raw = '/'.join((self.rawdir, self.files[reagent_type]['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
if reagent_type in ['morph', 'crispr']:
try:
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication, note) = row
except ValueError:
# Catch lines without publication or note
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication) = row
elif reagent_type == 'talen':
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
reagent_sequence2, publication, note) = row
else:
# should not get here
return
reagent_id = 'ZFIN:' + reagent_num.strip()
gene_id = 'ZFIN:' + gene_num.strip()
self.id_label_map[reagent_id] = reagent_symbol
if self.test_mode and (
reagent_num not in self.test_ids['morpholino'] and
gene_num not in self.test_ids['gene']):
continue
geno.addGeneTargetingReagent(reagent_id, reagent_symbol,
reagent_so_id, gene_id)
# The reagent targeted gene is added
# in the pheno_environment processing function.
# Add publication
# note that the publications can be comma-delimited,
# like: ZDB-PUB-100719-4,ZDB-PUB-130703-22
if publication != '':
pubs = re.split(r',', publication.strip())
for pub in pubs:
pub_id = 'ZFIN:' + pub.strip()
ref = Reference(graph, pub_id)
ref.addRefToGraph()
graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id)
# Add comment?
if note != '':
model.addComment(reagent_id, note)
# use the variant hash for reagents to list the affected genes
if reagent_id not in self.variant_loci_genes:
self.variant_loci_genes[reagent_id] = [gene_id]
else:
if gene_id not in self.variant_loci_genes[reagent_id]:
self.variant_loci_genes[reagent_id] += [gene_id]
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with Reagent type %s", reagent_type)
return
|
python
|
def _process_targeting_reagents(self, reagent_type, limit=None):
"""
This method processes the gene targeting knockdown reagents,
such as morpholinos, talens, and crisprs.
We create triples for the reagents and pass the data into a hash map
for use in the pheno_enviro method.
Morpholinos work similar to RNAi.
TALENs are artificial restriction enzymes
that can be used for genome editing in situ.
CRISPRs are knockdown reagents, working similar to RNAi
but at the transcriptional level instead of mRNA level.
You can read more about TALEN and CRISPR techniques in review
[Gaj et al]
http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5
TODO add sequences
Triples created:
<reagent_id> is a gene_targeting_reagent
<reagent_id> rdfs:label <reagent_symbol>
<reagent_id> has type <reagent_so_id>
<reagent_id> has comment <note>
<publication_id> is an individual
<publication_id> mentions <morpholino_id>
:param reagent_type: should be one of: morph, talen, crispr
:param limit:
:return:
"""
LOG.info("Processing Gene Targeting Reagents")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
if reagent_type not in ['morph', 'talen', 'crispr']:
LOG.error("You didn't specify the right kind of file type.")
return
raw = '/'.join((self.rawdir, self.files[reagent_type]['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
if reagent_type in ['morph', 'crispr']:
try:
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication, note) = row
except ValueError:
# Catch lines without publication or note
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
publication) = row
elif reagent_type == 'talen':
(gene_num, gene_so_id, gene_symbol, reagent_num,
reagent_so_id, reagent_symbol, reagent_sequence,
reagent_sequence2, publication, note) = row
else:
# should not get here
return
reagent_id = 'ZFIN:' + reagent_num.strip()
gene_id = 'ZFIN:' + gene_num.strip()
self.id_label_map[reagent_id] = reagent_symbol
if self.test_mode and (
reagent_num not in self.test_ids['morpholino'] and
gene_num not in self.test_ids['gene']):
continue
geno.addGeneTargetingReagent(reagent_id, reagent_symbol,
reagent_so_id, gene_id)
# The reagent targeted gene is added
# in the pheno_environment processing function.
# Add publication
# note that the publications can be comma-delimited,
# like: ZDB-PUB-100719-4,ZDB-PUB-130703-22
if publication != '':
pubs = re.split(r',', publication.strip())
for pub in pubs:
pub_id = 'ZFIN:' + pub.strip()
ref = Reference(graph, pub_id)
ref.addRefToGraph()
graph.addTriple(pub_id, self.globaltt['mentions'], reagent_id)
# Add comment?
if note != '':
model.addComment(reagent_id, note)
# use the variant hash for reagents to list the affected genes
if reagent_id not in self.variant_loci_genes:
self.variant_loci_genes[reagent_id] = [gene_id]
else:
if gene_id not in self.variant_loci_genes[reagent_id]:
self.variant_loci_genes[reagent_id] += [gene_id]
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with Reagent type %s", reagent_type)
return
|
[
"def",
"_process_targeting_reagents",
"(",
"self",
",",
"reagent_type",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing Gene Targeting Reagents\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"line_counter",
"=",
"0",
"model",
"=",
"Model",
"(",
"graph",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"if",
"reagent_type",
"not",
"in",
"[",
"'morph'",
",",
"'talen'",
",",
"'crispr'",
"]",
":",
"LOG",
".",
"error",
"(",
"\"You didn't specify the right kind of file type.\"",
")",
"return",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"reagent_type",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"if",
"reagent_type",
"in",
"[",
"'morph'",
",",
"'crispr'",
"]",
":",
"try",
":",
"(",
"gene_num",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"reagent_num",
",",
"reagent_so_id",
",",
"reagent_symbol",
",",
"reagent_sequence",
",",
"publication",
",",
"note",
")",
"=",
"row",
"except",
"ValueError",
":",
"# Catch lines without publication or note",
"(",
"gene_num",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"reagent_num",
",",
"reagent_so_id",
",",
"reagent_symbol",
",",
"reagent_sequence",
",",
"publication",
")",
"=",
"row",
"elif",
"reagent_type",
"==",
"'talen'",
":",
"(",
"gene_num",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"reagent_num",
",",
"reagent_so_id",
",",
"reagent_symbol",
",",
"reagent_sequence",
",",
"reagent_sequence2",
",",
"publication",
",",
"note",
")",
"=",
"row",
"else",
":",
"# should not get here",
"return",
"reagent_id",
"=",
"'ZFIN:'",
"+",
"reagent_num",
".",
"strip",
"(",
")",
"gene_id",
"=",
"'ZFIN:'",
"+",
"gene_num",
".",
"strip",
"(",
")",
"self",
".",
"id_label_map",
"[",
"reagent_id",
"]",
"=",
"reagent_symbol",
"if",
"self",
".",
"test_mode",
"and",
"(",
"reagent_num",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'morpholino'",
"]",
"and",
"gene_num",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'gene'",
"]",
")",
":",
"continue",
"geno",
".",
"addGeneTargetingReagent",
"(",
"reagent_id",
",",
"reagent_symbol",
",",
"reagent_so_id",
",",
"gene_id",
")",
"# The reagent targeted gene is added",
"# in the pheno_environment processing function.",
"# Add publication",
"# note that the publications can be comma-delimited,",
"# like: ZDB-PUB-100719-4,ZDB-PUB-130703-22",
"if",
"publication",
"!=",
"''",
":",
"pubs",
"=",
"re",
".",
"split",
"(",
"r','",
",",
"publication",
".",
"strip",
"(",
")",
")",
"for",
"pub",
"in",
"pubs",
":",
"pub_id",
"=",
"'ZFIN:'",
"+",
"pub",
".",
"strip",
"(",
")",
"ref",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
")",
"ref",
".",
"addRefToGraph",
"(",
")",
"graph",
".",
"addTriple",
"(",
"pub_id",
",",
"self",
".",
"globaltt",
"[",
"'mentions'",
"]",
",",
"reagent_id",
")",
"# Add comment?",
"if",
"note",
"!=",
"''",
":",
"model",
".",
"addComment",
"(",
"reagent_id",
",",
"note",
")",
"# use the variant hash for reagents to list the affected genes",
"if",
"reagent_id",
"not",
"in",
"self",
".",
"variant_loci_genes",
":",
"self",
".",
"variant_loci_genes",
"[",
"reagent_id",
"]",
"=",
"[",
"gene_id",
"]",
"else",
":",
"if",
"gene_id",
"not",
"in",
"self",
".",
"variant_loci_genes",
"[",
"reagent_id",
"]",
":",
"self",
".",
"variant_loci_genes",
"[",
"reagent_id",
"]",
"+=",
"[",
"gene_id",
"]",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with Reagent type %s\"",
",",
"reagent_type",
")",
"return"
] |
This method processes the gene targeting knockdown reagents,
such as morpholinos, talens, and crisprs.
We create triples for the reagents and pass the data into a hash map
for use in the pheno_enviro method.
Morpholinos work similar to RNAi.
TALENs are artificial restriction enzymes
that can be used for genome editing in situ.
CRISPRs are knockdown reagents, working similar to RNAi
but at the transcriptional level instead of mRNA level.
You can read more about TALEN and CRISPR techniques in review
[Gaj et al]
http://www.cell.com/trends/biotechnology/abstract/S0167-7799%2813%2900087-5
TODO add sequences
Triples created:
<reagent_id> is a gene_targeting_reagent
<reagent_id> rdfs:label <reagent_symbol>
<reagent_id> has type <reagent_so_id>
<reagent_id> has comment <note>
<publication_id> is an individual
<publication_id> mentions <morpholino_id>
:param reagent_type: should be one of: morph, talen, crispr
:param limit:
:return:
|
[
"This",
"method",
"processes",
"the",
"gene",
"targeting",
"knockdown",
"reagents",
"such",
"as",
"morpholinos",
"talens",
"and",
"crisprs",
".",
"We",
"create",
"triples",
"for",
"the",
"reagents",
"and",
"pass",
"the",
"data",
"into",
"a",
"hash",
"map",
"for",
"use",
"in",
"the",
"pheno_enviro",
"method",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1903-L2014
|
18,371
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN._process_uniprot_ids
|
def _process_uniprot_ids(self, limit=None):
"""
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.
Triples created:
<zfin_gene_id> a class
<zfin_gene_id> rdfs:label gene_symbol
<uniprot_id> is an Individual
<uniprot_id> has type <polypeptide>
<zfin_gene_id> has_gene_product <uniprot_id>
:param limit:
:return:
"""
LOG.info("Processing UniProt IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['uniprot']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, uniprot_id
# , empty
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
uniprot_id = 'UniProtKB:' + uniprot_id.strip()
geno.addGene(gene_id, gene_symbol)
# TODO: Abstract to one of the model utilities
model.addIndividualToGraph(
uniprot_id, None, self.globaltt['polypeptide'])
graph.addTriple(
gene_id, self.globaltt['has gene product'], uniprot_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with UniProt IDs")
return
|
python
|
def _process_uniprot_ids(self, limit=None):
"""
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.
Triples created:
<zfin_gene_id> a class
<zfin_gene_id> rdfs:label gene_symbol
<uniprot_id> is an Individual
<uniprot_id> has type <polypeptide>
<zfin_gene_id> has_gene_product <uniprot_id>
:param limit:
:return:
"""
LOG.info("Processing UniProt IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['uniprot']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, uniprot_id
# , empty
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
uniprot_id = 'UniProtKB:' + uniprot_id.strip()
geno.addGene(gene_id, gene_symbol)
# TODO: Abstract to one of the model utilities
model.addIndividualToGraph(
uniprot_id, None, self.globaltt['polypeptide'])
graph.addTriple(
gene_id, self.globaltt['has gene product'], uniprot_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with UniProt IDs")
return
|
[
"def",
"_process_uniprot_ids",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing UniProt IDs\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"line_counter",
"=",
"0",
"model",
"=",
"Model",
"(",
"graph",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'uniprot'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"gene_so_id",
",",
"gene_symbol",
",",
"uniprot_id",
"# , empty",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'gene'",
"]",
":",
"continue",
"gene_id",
"=",
"'ZFIN:'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"uniprot_id",
"=",
"'UniProtKB:'",
"+",
"uniprot_id",
".",
"strip",
"(",
")",
"geno",
".",
"addGene",
"(",
"gene_id",
",",
"gene_symbol",
")",
"# TODO: Abstract to one of the model utilities",
"model",
".",
"addIndividualToGraph",
"(",
"uniprot_id",
",",
"None",
",",
"self",
".",
"globaltt",
"[",
"'polypeptide'",
"]",
")",
"graph",
".",
"addTriple",
"(",
"gene_id",
",",
"self",
".",
"globaltt",
"[",
"'has gene product'",
"]",
",",
"uniprot_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with UniProt IDs\"",
")",
"return"
] |
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.
Triples created:
<zfin_gene_id> a class
<zfin_gene_id> rdfs:label gene_symbol
<uniprot_id> is an Individual
<uniprot_id> has type <polypeptide>
<zfin_gene_id> has_gene_product <uniprot_id>
:param limit:
:return:
|
[
"This",
"method",
"processes",
"the",
"mappings",
"from",
"ZFIN",
"gene",
"IDs",
"to",
"UniProtKB",
"IDs",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2235-L2287
|
18,372
|
monarch-initiative/dipper
|
dipper/sources/ZFIN.py
|
ZFIN.get_orthology_evidence_code
|
def get_orthology_evidence_code(self, abbrev):
'''
move to localtt & globltt
'''
# AA Amino acid sequence comparison.
# CE Coincident expression.
# CL Conserved genome location (synteny).
# FC Functional complementation.
# FH Formation of functional heteropolymers.
# IX Immunological cross-reaction.
# NS Not specified.
# NT Nucleotide sequence comparison.
# SI Similar response to inhibitors.
# SL Similar subcellular location.
# SS Similar substrate specificity.
# SU Similar subunit structure.
# XH Cross-hybridization to same molecular probe.
# PT Phylogenetic Tree.
# OT Other
eco_abbrev_map = {
'AA': 'ECO:0000031', # BLAST protein sequence similarity evidence
'CE': 'ECO:0000008', # expression evidence
'CL': 'ECO:0000044', # sequence similarity FIXME
'FC': 'ECO:0000012', # functional complementation
# functional complementation in a heterologous system
'FH': 'ECO:0000064',
'IX': 'ECO:0000040', # immunological assay evidence
'NS': None,
'NT': 'ECO:0000032', # nucleotide blast
'SI': 'ECO:0000094', # biological assay evidence FIXME
'SL': 'ECO:0000122', # protein localization evidence FIXME
'SS': 'ECO:0000024', # protein binding evidence FIXME
'SU': 'ECO:0000027', # structural similarity evidence
'XH': 'ECO:0000002', # direct assay evidence FIXME
'PT': 'ECO:0000080', # phylogenetic evidence
'OT': None,
}
if abbrev not in eco_abbrev_map:
LOG.warning("Evidence code for orthology (%s) not mapped", str(abbrev))
return eco_abbrev_map.get(abbrev)
|
python
|
def get_orthology_evidence_code(self, abbrev):
'''
move to localtt & globltt
'''
# AA Amino acid sequence comparison.
# CE Coincident expression.
# CL Conserved genome location (synteny).
# FC Functional complementation.
# FH Formation of functional heteropolymers.
# IX Immunological cross-reaction.
# NS Not specified.
# NT Nucleotide sequence comparison.
# SI Similar response to inhibitors.
# SL Similar subcellular location.
# SS Similar substrate specificity.
# SU Similar subunit structure.
# XH Cross-hybridization to same molecular probe.
# PT Phylogenetic Tree.
# OT Other
eco_abbrev_map = {
'AA': 'ECO:0000031', # BLAST protein sequence similarity evidence
'CE': 'ECO:0000008', # expression evidence
'CL': 'ECO:0000044', # sequence similarity FIXME
'FC': 'ECO:0000012', # functional complementation
# functional complementation in a heterologous system
'FH': 'ECO:0000064',
'IX': 'ECO:0000040', # immunological assay evidence
'NS': None,
'NT': 'ECO:0000032', # nucleotide blast
'SI': 'ECO:0000094', # biological assay evidence FIXME
'SL': 'ECO:0000122', # protein localization evidence FIXME
'SS': 'ECO:0000024', # protein binding evidence FIXME
'SU': 'ECO:0000027', # structural similarity evidence
'XH': 'ECO:0000002', # direct assay evidence FIXME
'PT': 'ECO:0000080', # phylogenetic evidence
'OT': None,
}
if abbrev not in eco_abbrev_map:
LOG.warning("Evidence code for orthology (%s) not mapped", str(abbrev))
return eco_abbrev_map.get(abbrev)
|
[
"def",
"get_orthology_evidence_code",
"(",
"self",
",",
"abbrev",
")",
":",
"# AA\tAmino acid sequence comparison.",
"# CE\tCoincident expression.",
"# CL\tConserved genome location (synteny).",
"# FC\tFunctional complementation.",
"# FH\tFormation of functional heteropolymers.",
"# IX\tImmunological cross-reaction.",
"# NS\tNot specified.",
"# NT\tNucleotide sequence comparison.",
"# SI\tSimilar response to inhibitors.",
"# SL\tSimilar subcellular location.",
"# SS\tSimilar substrate specificity.",
"# SU\tSimilar subunit structure.",
"# XH\tCross-hybridization to same molecular probe.",
"# PT\tPhylogenetic Tree.",
"# OT Other",
"eco_abbrev_map",
"=",
"{",
"'AA'",
":",
"'ECO:0000031'",
",",
"# BLAST protein sequence similarity evidence",
"'CE'",
":",
"'ECO:0000008'",
",",
"# expression evidence",
"'CL'",
":",
"'ECO:0000044'",
",",
"# sequence similarity FIXME",
"'FC'",
":",
"'ECO:0000012'",
",",
"# functional complementation",
"# functional complementation in a heterologous system",
"'FH'",
":",
"'ECO:0000064'",
",",
"'IX'",
":",
"'ECO:0000040'",
",",
"# immunological assay evidence",
"'NS'",
":",
"None",
",",
"'NT'",
":",
"'ECO:0000032'",
",",
"# nucleotide blast",
"'SI'",
":",
"'ECO:0000094'",
",",
"# biological assay evidence FIXME",
"'SL'",
":",
"'ECO:0000122'",
",",
"# protein localization evidence FIXME",
"'SS'",
":",
"'ECO:0000024'",
",",
"# protein binding evidence FIXME",
"'SU'",
":",
"'ECO:0000027'",
",",
"# structural similarity evidence",
"'XH'",
":",
"'ECO:0000002'",
",",
"# direct assay evidence FIXME",
"'PT'",
":",
"'ECO:0000080'",
",",
"# phylogenetic evidence",
"'OT'",
":",
"None",
",",
"}",
"if",
"abbrev",
"not",
"in",
"eco_abbrev_map",
":",
"LOG",
".",
"warning",
"(",
"\"Evidence code for orthology (%s) not mapped\"",
",",
"str",
"(",
"abbrev",
")",
")",
"return",
"eco_abbrev_map",
".",
"get",
"(",
"abbrev",
")"
] |
move to localtt & globltt
|
[
"move",
"to",
"localtt",
"&",
"globltt"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2798-L2840
|
18,373
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_diseases
|
def _process_diseases(self, limit=None):
"""
This method processes the KEGG disease IDs.
Triples created:
<disease_id> is a class
<disease_id> rdfs:label <disease_name>
:param limit:
:return:
"""
LOG.info("Processing diseases")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, disease_name) = row
disease_id = 'KEGG-'+disease_id.strip()
if disease_id not in self.label_hash:
self.label_hash[disease_id] = disease_name
if self.test_mode and disease_id not in self.test_ids['disease']:
continue
# Add the disease as a class.
# we don't get all of these from MONDO yet see:
# https://github.com/monarch-initiative/human-disease-ontology/issues/3
model.addClassToGraph(disease_id, disease_name)
# not typing the diseases as DOID:4 yet because
# I don't want to bulk up the graph unnecessarily
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with diseases")
return
|
python
|
def _process_diseases(self, limit=None):
"""
This method processes the KEGG disease IDs.
Triples created:
<disease_id> is a class
<disease_id> rdfs:label <disease_name>
:param limit:
:return:
"""
LOG.info("Processing diseases")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
raw = '/'.join((self.rawdir, self.files['disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, disease_name) = row
disease_id = 'KEGG-'+disease_id.strip()
if disease_id not in self.label_hash:
self.label_hash[disease_id] = disease_name
if self.test_mode and disease_id not in self.test_ids['disease']:
continue
# Add the disease as a class.
# we don't get all of these from MONDO yet see:
# https://github.com/monarch-initiative/human-disease-ontology/issues/3
model.addClassToGraph(disease_id, disease_name)
# not typing the diseases as DOID:4 yet because
# I don't want to bulk up the graph unnecessarily
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with diseases")
return
|
[
"def",
"_process_diseases",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing diseases\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"line_counter",
"=",
"0",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'disease'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"disease_id",
",",
"disease_name",
")",
"=",
"row",
"disease_id",
"=",
"'KEGG-'",
"+",
"disease_id",
".",
"strip",
"(",
")",
"if",
"disease_id",
"not",
"in",
"self",
".",
"label_hash",
":",
"self",
".",
"label_hash",
"[",
"disease_id",
"]",
"=",
"disease_name",
"if",
"self",
".",
"test_mode",
"and",
"disease_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'disease'",
"]",
":",
"continue",
"# Add the disease as a class.",
"# we don't get all of these from MONDO yet see:",
"# https://github.com/monarch-initiative/human-disease-ontology/issues/3",
"model",
".",
"addClassToGraph",
"(",
"disease_id",
",",
"disease_name",
")",
"# not typing the diseases as DOID:4 yet because",
"# I don't want to bulk up the graph unnecessarily",
"if",
"not",
"self",
".",
"test_mode",
"and",
"(",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
")",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with diseases\"",
")",
"return"
] |
This method processes the KEGG disease IDs.
Triples created:
<disease_id> is a class
<disease_id> rdfs:label <disease_name>
:param limit:
:return:
|
[
"This",
"method",
"processes",
"the",
"KEGG",
"disease",
"IDs",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L224-L269
|
18,374
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_genes
|
def _process_genes(self, limit=None):
"""
This method processes the KEGG gene IDs.
The label for the gene is pulled as
the first symbol in the list of gene symbols;
the rest are added as synonyms.
The long-form of the gene name is added as a definition.
This is hardcoded to just processes human genes.
Triples created:
<gene_id> is a SO:gene
<gene_id> rdfs:label <gene_name>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
family = Family(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['hsa_genes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_name) = row
gene_id = 'KEGG-'+gene_id.strip()
# the gene listing has a bunch of labels
# that are delimited, as:
# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,
# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin
# it looks like the list is semicolon delimited
# (symbol, name, gene_class)
# where the symbol is a comma-delimited list
# here, we split them up.
# we will take the first abbreviation and make it the symbol
# then take the rest as synonyms
gene_stuff = re.split('r;', gene_name)
symbollist = re.split(r',', gene_stuff[0])
first_symbol = symbollist[0].strip()
if gene_id not in self.label_hash:
self.label_hash[gene_id] = first_symbol
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
# Add the gene as a class.
geno.addGene(gene_id, first_symbol)
# add the long name as the description
if len(gene_stuff) > 1:
description = gene_stuff[1].strip()
model.addDefinition(gene_id, description)
# add the rest of the symbols as synonyms
for i in enumerate(symbollist, start=1):
model.addSynonym(gene_id, i[1].strip())
if len(gene_stuff) > 2:
ko_part = gene_stuff[2]
ko_match = re.search(r'K\d+', ko_part)
if ko_match is not None and len(ko_match.groups()) == 1:
ko = 'KEGG-ko:'+ko_match.group(1)
family.addMemberOf(gene_id, ko)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genes")
return
|
python
|
def _process_genes(self, limit=None):
"""
This method processes the KEGG gene IDs.
The label for the gene is pulled as
the first symbol in the list of gene symbols;
the rest are added as synonyms.
The long-form of the gene name is added as a definition.
This is hardcoded to just processes human genes.
Triples created:
<gene_id> is a SO:gene
<gene_id> rdfs:label <gene_name>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
family = Family(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['hsa_genes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_name) = row
gene_id = 'KEGG-'+gene_id.strip()
# the gene listing has a bunch of labels
# that are delimited, as:
# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,
# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin
# it looks like the list is semicolon delimited
# (symbol, name, gene_class)
# where the symbol is a comma-delimited list
# here, we split them up.
# we will take the first abbreviation and make it the symbol
# then take the rest as synonyms
gene_stuff = re.split('r;', gene_name)
symbollist = re.split(r',', gene_stuff[0])
first_symbol = symbollist[0].strip()
if gene_id not in self.label_hash:
self.label_hash[gene_id] = first_symbol
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
# Add the gene as a class.
geno.addGene(gene_id, first_symbol)
# add the long name as the description
if len(gene_stuff) > 1:
description = gene_stuff[1].strip()
model.addDefinition(gene_id, description)
# add the rest of the symbols as synonyms
for i in enumerate(symbollist, start=1):
model.addSynonym(gene_id, i[1].strip())
if len(gene_stuff) > 2:
ko_part = gene_stuff[2]
ko_match = re.search(r'K\d+', ko_part)
if ko_match is not None and len(ko_match.groups()) == 1:
ko = 'KEGG-ko:'+ko_match.group(1)
family.addMemberOf(gene_id, ko)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with genes")
return
|
[
"def",
"_process_genes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing genes\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"family",
"=",
"Family",
"(",
"graph",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'hsa_genes'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"gene_name",
")",
"=",
"row",
"gene_id",
"=",
"'KEGG-'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"# the gene listing has a bunch of labels",
"# that are delimited, as:",
"# DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT,",
"# EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin",
"# it looks like the list is semicolon delimited",
"# (symbol, name, gene_class)",
"# where the symbol is a comma-delimited list",
"# here, we split them up.",
"# we will take the first abbreviation and make it the symbol",
"# then take the rest as synonyms",
"gene_stuff",
"=",
"re",
".",
"split",
"(",
"'r;'",
",",
"gene_name",
")",
"symbollist",
"=",
"re",
".",
"split",
"(",
"r','",
",",
"gene_stuff",
"[",
"0",
"]",
")",
"first_symbol",
"=",
"symbollist",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"gene_id",
"not",
"in",
"self",
".",
"label_hash",
":",
"self",
".",
"label_hash",
"[",
"gene_id",
"]",
"=",
"first_symbol",
"if",
"self",
".",
"test_mode",
"and",
"gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'genes'",
"]",
":",
"continue",
"# Add the gene as a class.",
"geno",
".",
"addGene",
"(",
"gene_id",
",",
"first_symbol",
")",
"# add the long name as the description",
"if",
"len",
"(",
"gene_stuff",
")",
">",
"1",
":",
"description",
"=",
"gene_stuff",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"model",
".",
"addDefinition",
"(",
"gene_id",
",",
"description",
")",
"# add the rest of the symbols as synonyms",
"for",
"i",
"in",
"enumerate",
"(",
"symbollist",
",",
"start",
"=",
"1",
")",
":",
"model",
".",
"addSynonym",
"(",
"gene_id",
",",
"i",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"if",
"len",
"(",
"gene_stuff",
")",
">",
"2",
":",
"ko_part",
"=",
"gene_stuff",
"[",
"2",
"]",
"ko_match",
"=",
"re",
".",
"search",
"(",
"r'K\\d+'",
",",
"ko_part",
")",
"if",
"ko_match",
"is",
"not",
"None",
"and",
"len",
"(",
"ko_match",
".",
"groups",
"(",
")",
")",
"==",
"1",
":",
"ko",
"=",
"'KEGG-ko:'",
"+",
"ko_match",
".",
"group",
"(",
"1",
")",
"family",
".",
"addMemberOf",
"(",
"gene_id",
",",
"ko",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with genes\"",
")",
"return"
] |
This method processes the KEGG gene IDs.
The label for the gene is pulled as
the first symbol in the list of gene symbols;
the rest are added as synonyms.
The long-form of the gene name is added as a definition.
This is hardcoded to just processes human genes.
Triples created:
<gene_id> is a SO:gene
<gene_id> rdfs:label <gene_name>
:param limit:
:return:
|
[
"This",
"method",
"processes",
"the",
"KEGG",
"gene",
"IDs",
".",
"The",
"label",
"for",
"the",
"gene",
"is",
"pulled",
"as",
"the",
"first",
"symbol",
"in",
"the",
"list",
"of",
"gene",
"symbols",
";",
"the",
"rest",
"are",
"added",
"as",
"synonyms",
".",
"The",
"long",
"-",
"form",
"of",
"the",
"gene",
"name",
"is",
"added",
"as",
"a",
"definition",
".",
"This",
"is",
"hardcoded",
"to",
"just",
"processes",
"human",
"genes",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L271-L352
|
18,375
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_ortholog_classes
|
def _process_ortholog_classes(self, limit=None):
"""
This method add the KEGG orthology classes to the graph.
If there's an embedded enzyme commission number,
that is added as an xref.
Triples created:
<orthology_class_id> is a class
<orthology_class_id> has label <orthology_symbols>
<orthology_class_id> has description <orthology_description>
:param limit:
:return:
"""
LOG.info("Processing ortholog classes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(orthology_class_id, orthology_class_name) = row
if self.test_mode and orthology_class_id \
not in self.test_ids['orthology_classes']:
continue
# The orthology class is essentially a KEGG gene ID
# that is species agnostic.
# Add the ID and label as a gene family class
other_labels = re.split(r'[;,]', orthology_class_name)
# the first one is the label we'll use
orthology_label = other_labels[0]
orthology_class_id = 'KEGG-'+orthology_class_id.strip()
orthology_type = self.globaltt['gene_family']
model.addClassToGraph(
orthology_class_id, orthology_label, orthology_type)
if len(other_labels) > 1:
# add the rest as synonyms
# todo skip the first
for s in other_labels:
model.addSynonym(orthology_class_id, s.strip())
# add the last one as the description
d = other_labels[len(other_labels)-1]
model.addDescription(orthology_class_id, d)
# add the enzyme commission number (EC:1.2.99.5)as an xref
# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]
# can also have a dash, like EC:1.10.3.-
ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d)
if ec_matches is not None:
for ecm in ec_matches:
model.addXref(orthology_class_id, 'EC:' + ecm)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with ortholog classes")
return
|
python
|
def _process_ortholog_classes(self, limit=None):
"""
This method add the KEGG orthology classes to the graph.
If there's an embedded enzyme commission number,
that is added as an xref.
Triples created:
<orthology_class_id> is a class
<orthology_class_id> has label <orthology_symbols>
<orthology_class_id> has description <orthology_description>
:param limit:
:return:
"""
LOG.info("Processing ortholog classes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(orthology_class_id, orthology_class_name) = row
if self.test_mode and orthology_class_id \
not in self.test_ids['orthology_classes']:
continue
# The orthology class is essentially a KEGG gene ID
# that is species agnostic.
# Add the ID and label as a gene family class
other_labels = re.split(r'[;,]', orthology_class_name)
# the first one is the label we'll use
orthology_label = other_labels[0]
orthology_class_id = 'KEGG-'+orthology_class_id.strip()
orthology_type = self.globaltt['gene_family']
model.addClassToGraph(
orthology_class_id, orthology_label, orthology_type)
if len(other_labels) > 1:
# add the rest as synonyms
# todo skip the first
for s in other_labels:
model.addSynonym(orthology_class_id, s.strip())
# add the last one as the description
d = other_labels[len(other_labels)-1]
model.addDescription(orthology_class_id, d)
# add the enzyme commission number (EC:1.2.99.5)as an xref
# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]
# can also have a dash, like EC:1.10.3.-
ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d)
if ec_matches is not None:
for ecm in ec_matches:
model.addXref(orthology_class_id, 'EC:' + ecm)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with ortholog classes")
return
|
[
"def",
"_process_ortholog_classes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing ortholog classes\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'ortholog_classes'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"orthology_class_id",
",",
"orthology_class_name",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"orthology_class_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'orthology_classes'",
"]",
":",
"continue",
"# The orthology class is essentially a KEGG gene ID",
"# that is species agnostic.",
"# Add the ID and label as a gene family class",
"other_labels",
"=",
"re",
".",
"split",
"(",
"r'[;,]'",
",",
"orthology_class_name",
")",
"# the first one is the label we'll use",
"orthology_label",
"=",
"other_labels",
"[",
"0",
"]",
"orthology_class_id",
"=",
"'KEGG-'",
"+",
"orthology_class_id",
".",
"strip",
"(",
")",
"orthology_type",
"=",
"self",
".",
"globaltt",
"[",
"'gene_family'",
"]",
"model",
".",
"addClassToGraph",
"(",
"orthology_class_id",
",",
"orthology_label",
",",
"orthology_type",
")",
"if",
"len",
"(",
"other_labels",
")",
">",
"1",
":",
"# add the rest as synonyms",
"# todo skip the first",
"for",
"s",
"in",
"other_labels",
":",
"model",
".",
"addSynonym",
"(",
"orthology_class_id",
",",
"s",
".",
"strip",
"(",
")",
")",
"# add the last one as the description",
"d",
"=",
"other_labels",
"[",
"len",
"(",
"other_labels",
")",
"-",
"1",
"]",
"model",
".",
"addDescription",
"(",
"orthology_class_id",
",",
"d",
")",
"# add the enzyme commission number (EC:1.2.99.5)as an xref",
"# sometimes there's two, like [EC:1.3.5.1 1.3.5.4]",
"# can also have a dash, like EC:1.10.3.-",
"ec_matches",
"=",
"re",
".",
"findall",
"(",
"r'((?:\\d+|\\.|-){5,7})'",
",",
"d",
")",
"if",
"ec_matches",
"is",
"not",
"None",
":",
"for",
"ecm",
"in",
"ec_matches",
":",
"model",
".",
"addXref",
"(",
"orthology_class_id",
",",
"'EC:'",
"+",
"ecm",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with ortholog classes\"",
")",
"return"
] |
This method add the KEGG orthology classes to the graph.
If there's an embedded enzyme commission number,
that is added as an xref.
Triples created:
<orthology_class_id> is a class
<orthology_class_id> has label <orthology_symbols>
<orthology_class_id> has description <orthology_description>
:param limit:
:return:
|
[
"This",
"method",
"add",
"the",
"KEGG",
"orthology",
"classes",
"to",
"the",
"graph",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L354-L423
|
18,376
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_orthologs
|
def _process_orthologs(self, raw, limit=None):
"""
This method maps orthologs for a species to the KEGG orthology classes.
Triples created:
<gene_id> is a class
<orthology_class_id> is a class
<assoc_id> has subject <gene_id>
<assoc_id> has object <orthology_class_id>
:param limit:
:return:
"""
LOG.info("Processing orthologs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, orthology_class_id) = row
orthology_class_id = 'KEGG:'+orthology_class_id.strip()
gene_id = 'KEGG:' + gene_id.strip()
# note that the panther_id references a group of orthologs,
# and is not 1:1 with the rest
# add the KO id as a gene-family grouping class
OrthologyAssoc(
graph, self.name, gene_id, None).add_gene_family_to_graph(
orthology_class_id)
# add gene and orthology class to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_id, None)
model.addClassToGraph(orthology_class_id, None)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with orthologs")
return
|
python
|
def _process_orthologs(self, raw, limit=None):
"""
This method maps orthologs for a species to the KEGG orthology classes.
Triples created:
<gene_id> is a class
<orthology_class_id> is a class
<assoc_id> has subject <gene_id>
<assoc_id> has object <orthology_class_id>
:param limit:
:return:
"""
LOG.info("Processing orthologs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, orthology_class_id) = row
orthology_class_id = 'KEGG:'+orthology_class_id.strip()
gene_id = 'KEGG:' + gene_id.strip()
# note that the panther_id references a group of orthologs,
# and is not 1:1 with the rest
# add the KO id as a gene-family grouping class
OrthologyAssoc(
graph, self.name, gene_id, None).add_gene_family_to_graph(
orthology_class_id)
# add gene and orthology class to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_id, None)
model.addClassToGraph(orthology_class_id, None)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with orthologs")
return
|
[
"def",
"_process_orthologs",
"(",
"self",
",",
"raw",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing orthologs\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"orthology_class_id",
")",
"=",
"row",
"orthology_class_id",
"=",
"'KEGG:'",
"+",
"orthology_class_id",
".",
"strip",
"(",
")",
"gene_id",
"=",
"'KEGG:'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"# note that the panther_id references a group of orthologs,",
"# and is not 1:1 with the rest",
"# add the KO id as a gene-family grouping class",
"OrthologyAssoc",
"(",
"graph",
",",
"self",
".",
"name",
",",
"gene_id",
",",
"None",
")",
".",
"add_gene_family_to_graph",
"(",
"orthology_class_id",
")",
"# add gene and orthology class to graph;",
"# assume labels will be taken care of elsewhere",
"model",
".",
"addClassToGraph",
"(",
"gene_id",
",",
"None",
")",
"model",
".",
"addClassToGraph",
"(",
"orthology_class_id",
",",
"None",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with orthologs\"",
")",
"return"
] |
This method maps orthologs for a species to the KEGG orthology classes.
Triples created:
<gene_id> is a class
<orthology_class_id> is a class
<assoc_id> has subject <gene_id>
<assoc_id> has object <orthology_class_id>
:param limit:
:return:
|
[
"This",
"method",
"maps",
"orthologs",
"for",
"a",
"species",
"to",
"the",
"KEGG",
"orthology",
"classes",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L425-L473
|
18,377
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_kegg_disease2gene
|
def _process_kegg_disease2gene(self, limit=None):
"""
This method creates an association between diseases and
their associated genes. We are being conservative here, and only
processing those diseases for which there is no mapping to OMIM.
Triples created:
<alternate_locus> is an Individual
<alternate_locus> has type <variant_locus>
<alternate_locus> is an allele of <gene_id>
<assoc_id> has subject <disease_id>
<assoc_id> has object <gene_id>
:param limit:
:return:
"""
LOG.info("Processing KEGG disease to gene")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
rel = self.globaltt['is marker for']
noomimset = set()
raw = '/'.join((self.rawdir, self.files['disease_gene']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, disease_id) = row
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
gene_id = 'KEGG-' + gene_id.strip()
disease_id = 'KEGG-' + disease_id.strip()
# only add diseases for which
# there is no omim id and not a grouping class
if disease_id not in self.kegg_disease_hash:
# add as a class
disease_label = None
if disease_id in self.label_hash:
disease_label = self.label_hash[disease_id]
if re.search(r'includ', str(disease_label)):
# they use 'including' when it's a grouping class
LOG.info(
"Skipping this association because " +
"it's a grouping class: %s",
disease_label)
continue
# type this disease_id as a disease
model.addClassToGraph(disease_id, disease_label)
# , class_type=self.globaltt['disease'])
noomimset.add(disease_id)
alt_locus_id = self._make_variant_locus_id(gene_id, disease_id)
alt_label = self.label_hash[alt_locus_id]
model.addIndividualToGraph(
alt_locus_id, alt_label, self.globaltt['variant_locus'])
geno.addAffectedLocus(alt_locus_id, gene_id)
model.addBlankNodeAnnotation(alt_locus_id)
# Add the disease to gene relationship.
assoc = G2PAssoc(graph, self.name, alt_locus_id, disease_id, rel)
assoc.add_association_to_graph()
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with KEGG disease to gene")
LOG.info("Found %d diseases with no omim id", len(noomimset))
return
|
python
|
def _process_kegg_disease2gene(self, limit=None):
"""
This method creates an association between diseases and
their associated genes. We are being conservative here, and only
processing those diseases for which there is no mapping to OMIM.
Triples created:
<alternate_locus> is an Individual
<alternate_locus> has type <variant_locus>
<alternate_locus> is an allele of <gene_id>
<assoc_id> has subject <disease_id>
<assoc_id> has object <gene_id>
:param limit:
:return:
"""
LOG.info("Processing KEGG disease to gene")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
rel = self.globaltt['is marker for']
noomimset = set()
raw = '/'.join((self.rawdir, self.files['disease_gene']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, disease_id) = row
if self.test_mode and gene_id not in self.test_ids['genes']:
continue
gene_id = 'KEGG-' + gene_id.strip()
disease_id = 'KEGG-' + disease_id.strip()
# only add diseases for which
# there is no omim id and not a grouping class
if disease_id not in self.kegg_disease_hash:
# add as a class
disease_label = None
if disease_id in self.label_hash:
disease_label = self.label_hash[disease_id]
if re.search(r'includ', str(disease_label)):
# they use 'including' when it's a grouping class
LOG.info(
"Skipping this association because " +
"it's a grouping class: %s",
disease_label)
continue
# type this disease_id as a disease
model.addClassToGraph(disease_id, disease_label)
# , class_type=self.globaltt['disease'])
noomimset.add(disease_id)
alt_locus_id = self._make_variant_locus_id(gene_id, disease_id)
alt_label = self.label_hash[alt_locus_id]
model.addIndividualToGraph(
alt_locus_id, alt_label, self.globaltt['variant_locus'])
geno.addAffectedLocus(alt_locus_id, gene_id)
model.addBlankNodeAnnotation(alt_locus_id)
# Add the disease to gene relationship.
assoc = G2PAssoc(graph, self.name, alt_locus_id, disease_id, rel)
assoc.add_association_to_graph()
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with KEGG disease to gene")
LOG.info("Found %d diseases with no omim id", len(noomimset))
return
|
[
"def",
"_process_kegg_disease2gene",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing KEGG disease to gene\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"rel",
"=",
"self",
".",
"globaltt",
"[",
"'is marker for'",
"]",
"noomimset",
"=",
"set",
"(",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'disease_gene'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"gene_id",
",",
"disease_id",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'genes'",
"]",
":",
"continue",
"gene_id",
"=",
"'KEGG-'",
"+",
"gene_id",
".",
"strip",
"(",
")",
"disease_id",
"=",
"'KEGG-'",
"+",
"disease_id",
".",
"strip",
"(",
")",
"# only add diseases for which",
"# there is no omim id and not a grouping class",
"if",
"disease_id",
"not",
"in",
"self",
".",
"kegg_disease_hash",
":",
"# add as a class",
"disease_label",
"=",
"None",
"if",
"disease_id",
"in",
"self",
".",
"label_hash",
":",
"disease_label",
"=",
"self",
".",
"label_hash",
"[",
"disease_id",
"]",
"if",
"re",
".",
"search",
"(",
"r'includ'",
",",
"str",
"(",
"disease_label",
")",
")",
":",
"# they use 'including' when it's a grouping class",
"LOG",
".",
"info",
"(",
"\"Skipping this association because \"",
"+",
"\"it's a grouping class: %s\"",
",",
"disease_label",
")",
"continue",
"# type this disease_id as a disease",
"model",
".",
"addClassToGraph",
"(",
"disease_id",
",",
"disease_label",
")",
"# , class_type=self.globaltt['disease'])",
"noomimset",
".",
"add",
"(",
"disease_id",
")",
"alt_locus_id",
"=",
"self",
".",
"_make_variant_locus_id",
"(",
"gene_id",
",",
"disease_id",
")",
"alt_label",
"=",
"self",
".",
"label_hash",
"[",
"alt_locus_id",
"]",
"model",
".",
"addIndividualToGraph",
"(",
"alt_locus_id",
",",
"alt_label",
",",
"self",
".",
"globaltt",
"[",
"'variant_locus'",
"]",
")",
"geno",
".",
"addAffectedLocus",
"(",
"alt_locus_id",
",",
"gene_id",
")",
"model",
".",
"addBlankNodeAnnotation",
"(",
"alt_locus_id",
")",
"# Add the disease to gene relationship.",
"assoc",
"=",
"G2PAssoc",
"(",
"graph",
",",
"self",
".",
"name",
",",
"alt_locus_id",
",",
"disease_id",
",",
"rel",
")",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"(",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
")",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with KEGG disease to gene\"",
")",
"LOG",
".",
"info",
"(",
"\"Found %d diseases with no omim id\"",
",",
"len",
"(",
"noomimset",
")",
")",
"return"
] |
This method creates an association between diseases and
their associated genes. We are being conservative here, and only
processing those diseases for which there is no mapping to OMIM.
Triples created:
<alternate_locus> is an Individual
<alternate_locus> has type <variant_locus>
<alternate_locus> is an allele of <gene_id>
<assoc_id> has subject <disease_id>
<assoc_id> has object <gene_id>
:param limit:
:return:
|
[
"This",
"method",
"creates",
"an",
"association",
"between",
"diseases",
"and",
"their",
"associated",
"genes",
".",
"We",
"are",
"being",
"conservative",
"here",
"and",
"only",
"processing",
"those",
"diseases",
"for",
"which",
"there",
"is",
"no",
"mapping",
"to",
"OMIM",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L475-L551
|
18,378
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_omim2gene
|
def _process_omim2gene(self, limit=None):
"""
This method maps the OMIM IDs and KEGG gene ID.
Currently split based on the link_type field.
Equivalent link types are mapped as gene XRefs.
Reverse link types are mapped as disease to gene associations.
Original link types are currently skipped.
Triples created:
<kegg_gene_id> is a Gene
<omim_gene_id> is a Gene
<kegg_gene_id>> hasXref <omim_gene_id>
<assoc_id> has subject <omim_disease_id>
<assoc_id> has object <kegg_gene_id>
:param limit:
:return:
"""
LOG.info("Processing OMIM to KEGG gene")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['omim2gene']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, omim_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
kegg_gene_id = 'KEGG-' + kegg_gene_id.strip()
omim_id = re.sub(r'omim', 'OMIM', omim_id)
if link_type == 'equivalent':
# these are genes!
# so add them as a class then make equivalence
model.addClassToGraph(omim_id, None)
geno.addGene(kegg_gene_id, None)
if not DipperUtil.is_omim_disease(omim_id):
model.addEquivalentClass(kegg_gene_id, omim_id)
elif link_type == 'reverse':
# make an association between an OMIM ID & the KEGG gene ID
# we do this with omim ids because
# they are more atomic than KEGG ids
alt_locus_id = self._make_variant_locus_id(kegg_gene_id, omim_id)
alt_label = self.label_hash[alt_locus_id]
model.addIndividualToGraph(
alt_locus_id, alt_label, self.globaltt['variant_locus'])
geno.addAffectedLocus(alt_locus_id, kegg_gene_id)
model.addBlankNodeAnnotation(alt_locus_id)
# Add the disease to gene relationship.
rel = self.globaltt['is marker for']
assoc = G2PAssoc(graph, self.name, alt_locus_id, omim_id, rel)
assoc.add_association_to_graph()
elif link_type == 'original':
# these are sometimes a gene, and sometimes a disease
LOG.info(
'Unable to handle original link for %s-%s',
kegg_gene_id, omim_id)
else:
# don't know what these are
LOG.warning(
'Unhandled link type for %s-%s: %s',
kegg_gene_id, omim_id, link_type)
if (not self.test_mode) and (
limit is not None and line_counter > limit):
break
LOG.info("Done with OMIM to KEGG gene")
return
|
python
|
def _process_omim2gene(self, limit=None):
"""
This method maps the OMIM IDs and KEGG gene ID.
Currently split based on the link_type field.
Equivalent link types are mapped as gene XRefs.
Reverse link types are mapped as disease to gene associations.
Original link types are currently skipped.
Triples created:
<kegg_gene_id> is a Gene
<omim_gene_id> is a Gene
<kegg_gene_id>> hasXref <omim_gene_id>
<assoc_id> has subject <omim_disease_id>
<assoc_id> has object <kegg_gene_id>
:param limit:
:return:
"""
LOG.info("Processing OMIM to KEGG gene")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['omim2gene']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, omim_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
kegg_gene_id = 'KEGG-' + kegg_gene_id.strip()
omim_id = re.sub(r'omim', 'OMIM', omim_id)
if link_type == 'equivalent':
# these are genes!
# so add them as a class then make equivalence
model.addClassToGraph(omim_id, None)
geno.addGene(kegg_gene_id, None)
if not DipperUtil.is_omim_disease(omim_id):
model.addEquivalentClass(kegg_gene_id, omim_id)
elif link_type == 'reverse':
# make an association between an OMIM ID & the KEGG gene ID
# we do this with omim ids because
# they are more atomic than KEGG ids
alt_locus_id = self._make_variant_locus_id(kegg_gene_id, omim_id)
alt_label = self.label_hash[alt_locus_id]
model.addIndividualToGraph(
alt_locus_id, alt_label, self.globaltt['variant_locus'])
geno.addAffectedLocus(alt_locus_id, kegg_gene_id)
model.addBlankNodeAnnotation(alt_locus_id)
# Add the disease to gene relationship.
rel = self.globaltt['is marker for']
assoc = G2PAssoc(graph, self.name, alt_locus_id, omim_id, rel)
assoc.add_association_to_graph()
elif link_type == 'original':
# these are sometimes a gene, and sometimes a disease
LOG.info(
'Unable to handle original link for %s-%s',
kegg_gene_id, omim_id)
else:
# don't know what these are
LOG.warning(
'Unhandled link type for %s-%s: %s',
kegg_gene_id, omim_id, link_type)
if (not self.test_mode) and (
limit is not None and line_counter > limit):
break
LOG.info("Done with OMIM to KEGG gene")
return
|
[
"def",
"_process_omim2gene",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing OMIM to KEGG gene\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'omim2gene'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"kegg_gene_id",
",",
"omim_id",
",",
"link_type",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"kegg_gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'genes'",
"]",
":",
"continue",
"kegg_gene_id",
"=",
"'KEGG-'",
"+",
"kegg_gene_id",
".",
"strip",
"(",
")",
"omim_id",
"=",
"re",
".",
"sub",
"(",
"r'omim'",
",",
"'OMIM'",
",",
"omim_id",
")",
"if",
"link_type",
"==",
"'equivalent'",
":",
"# these are genes!",
"# so add them as a class then make equivalence",
"model",
".",
"addClassToGraph",
"(",
"omim_id",
",",
"None",
")",
"geno",
".",
"addGene",
"(",
"kegg_gene_id",
",",
"None",
")",
"if",
"not",
"DipperUtil",
".",
"is_omim_disease",
"(",
"omim_id",
")",
":",
"model",
".",
"addEquivalentClass",
"(",
"kegg_gene_id",
",",
"omim_id",
")",
"elif",
"link_type",
"==",
"'reverse'",
":",
"# make an association between an OMIM ID & the KEGG gene ID",
"# we do this with omim ids because",
"# they are more atomic than KEGG ids",
"alt_locus_id",
"=",
"self",
".",
"_make_variant_locus_id",
"(",
"kegg_gene_id",
",",
"omim_id",
")",
"alt_label",
"=",
"self",
".",
"label_hash",
"[",
"alt_locus_id",
"]",
"model",
".",
"addIndividualToGraph",
"(",
"alt_locus_id",
",",
"alt_label",
",",
"self",
".",
"globaltt",
"[",
"'variant_locus'",
"]",
")",
"geno",
".",
"addAffectedLocus",
"(",
"alt_locus_id",
",",
"kegg_gene_id",
")",
"model",
".",
"addBlankNodeAnnotation",
"(",
"alt_locus_id",
")",
"# Add the disease to gene relationship.",
"rel",
"=",
"self",
".",
"globaltt",
"[",
"'is marker for'",
"]",
"assoc",
"=",
"G2PAssoc",
"(",
"graph",
",",
"self",
".",
"name",
",",
"alt_locus_id",
",",
"omim_id",
",",
"rel",
")",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"elif",
"link_type",
"==",
"'original'",
":",
"# these are sometimes a gene, and sometimes a disease",
"LOG",
".",
"info",
"(",
"'Unable to handle original link for %s-%s'",
",",
"kegg_gene_id",
",",
"omim_id",
")",
"else",
":",
"# don't know what these are",
"LOG",
".",
"warning",
"(",
"'Unhandled link type for %s-%s: %s'",
",",
"kegg_gene_id",
",",
"omim_id",
",",
"link_type",
")",
"if",
"(",
"not",
"self",
".",
"test_mode",
")",
"and",
"(",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
")",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with OMIM to KEGG gene\"",
")",
"return"
] |
This method maps the OMIM IDs and KEGG gene ID.
Currently split based on the link_type field.
Equivalent link types are mapped as gene XRefs.
Reverse link types are mapped as disease to gene associations.
Original link types are currently skipped.
Triples created:
<kegg_gene_id> is a Gene
<omim_gene_id> is a Gene
<kegg_gene_id>> hasXref <omim_gene_id>
<assoc_id> has subject <omim_disease_id>
<assoc_id> has object <kegg_gene_id>
:param limit:
:return:
|
[
"This",
"method",
"maps",
"the",
"OMIM",
"IDs",
"and",
"KEGG",
"gene",
"ID",
".",
"Currently",
"split",
"based",
"on",
"the",
"link_type",
"field",
".",
"Equivalent",
"link",
"types",
"are",
"mapped",
"as",
"gene",
"XRefs",
".",
"Reverse",
"link",
"types",
"are",
"mapped",
"as",
"disease",
"to",
"gene",
"associations",
".",
"Original",
"link",
"types",
"are",
"currently",
"skipped",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L553-L634
|
18,379
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_genes_kegg2ncbi
|
def _process_genes_kegg2ncbi(self, limit=None):
"""
This method maps the KEGG human gene IDs
to the corresponding NCBI Gene IDs.
Triples created:
<kegg_gene_id> is a class
<ncbi_gene_id> is a class
<kegg_gene_id> equivalentClass <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing KEGG gene IDs to NCBI gene IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ncbi']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, ncbi_gene_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
# Adjust the NCBI gene ID prefix.
ncbi_gene_id = re.sub(r'ncbi-geneid', 'NCBIGene', ncbi_gene_id)
kegg_gene_id = 'KEGG-' + kegg_gene_id
# Adding the KEGG gene ID to the graph here is redundant,
# unless there happens to be additional gene IDs in this table
# not present in the genes table.
model.addClassToGraph(kegg_gene_id, None)
model.addClassToGraph(ncbi_gene_id, None)
model.addEquivalentClass(kegg_gene_id, ncbi_gene_id)
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with KEGG gene IDs to NCBI gene IDs")
return
|
python
|
def _process_genes_kegg2ncbi(self, limit=None):
"""
This method maps the KEGG human gene IDs
to the corresponding NCBI Gene IDs.
Triples created:
<kegg_gene_id> is a class
<ncbi_gene_id> is a class
<kegg_gene_id> equivalentClass <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing KEGG gene IDs to NCBI gene IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['ncbi']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(kegg_gene_id, ncbi_gene_id, link_type) = row
if self.test_mode and kegg_gene_id not in self.test_ids['genes']:
continue
# Adjust the NCBI gene ID prefix.
ncbi_gene_id = re.sub(r'ncbi-geneid', 'NCBIGene', ncbi_gene_id)
kegg_gene_id = 'KEGG-' + kegg_gene_id
# Adding the KEGG gene ID to the graph here is redundant,
# unless there happens to be additional gene IDs in this table
# not present in the genes table.
model.addClassToGraph(kegg_gene_id, None)
model.addClassToGraph(ncbi_gene_id, None)
model.addEquivalentClass(kegg_gene_id, ncbi_gene_id)
if not self.test_mode and (
limit is not None and line_counter > limit):
break
LOG.info("Done with KEGG gene IDs to NCBI gene IDs")
return
|
[
"def",
"_process_genes_kegg2ncbi",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing KEGG gene IDs to NCBI gene IDs\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'ncbi'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"kegg_gene_id",
",",
"ncbi_gene_id",
",",
"link_type",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"kegg_gene_id",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'genes'",
"]",
":",
"continue",
"# Adjust the NCBI gene ID prefix.",
"ncbi_gene_id",
"=",
"re",
".",
"sub",
"(",
"r'ncbi-geneid'",
",",
"'NCBIGene'",
",",
"ncbi_gene_id",
")",
"kegg_gene_id",
"=",
"'KEGG-'",
"+",
"kegg_gene_id",
"# Adding the KEGG gene ID to the graph here is redundant,",
"# unless there happens to be additional gene IDs in this table",
"# not present in the genes table.",
"model",
".",
"addClassToGraph",
"(",
"kegg_gene_id",
",",
"None",
")",
"model",
".",
"addClassToGraph",
"(",
"ncbi_gene_id",
",",
"None",
")",
"model",
".",
"addEquivalentClass",
"(",
"kegg_gene_id",
",",
"ncbi_gene_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"(",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
")",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with KEGG gene IDs to NCBI gene IDs\"",
")",
"return"
] |
This method maps the KEGG human gene IDs
to the corresponding NCBI Gene IDs.
Triples created:
<kegg_gene_id> is a class
<ncbi_gene_id> is a class
<kegg_gene_id> equivalentClass <ncbi_gene_id>
:param limit:
:return:
|
[
"This",
"method",
"maps",
"the",
"KEGG",
"human",
"gene",
"IDs",
"to",
"the",
"corresponding",
"NCBI",
"Gene",
"IDs",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L706-L754
|
18,380
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._process_pathway_disease
|
def _process_pathway_disease(self, limit):
"""
We make a link between the pathway identifiers,
and any diseases associated with them.
Since we model diseases as processes, we make a triple saying that
the pathway may be causally upstream of or within the disease process.
:param limit:
:return:
"""
LOG.info("Processing KEGG pathways to disease ids")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
raw = '/'.join((self.rawdir, self.files['pathway_disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, kegg_pathway_num) = row
if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']:
continue
disease_id = 'KEGG-' + disease_id
# will look like KEGG-path:map04130 or KEGG-path:hsa04130
pathway_id = 'KEGG-' + kegg_pathway_num
graph.addTriple(
pathway_id,
self.globaltt['causally upstream of or within'],
disease_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
python
|
def _process_pathway_disease(self, limit):
"""
We make a link between the pathway identifiers,
and any diseases associated with them.
Since we model diseases as processes, we make a triple saying that
the pathway may be causally upstream of or within the disease process.
:param limit:
:return:
"""
LOG.info("Processing KEGG pathways to disease ids")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
raw = '/'.join((self.rawdir, self.files['pathway_disease']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(disease_id, kegg_pathway_num) = row
if self.test_mode and kegg_pathway_num not in self.test_ids['pathway']:
continue
disease_id = 'KEGG-' + disease_id
# will look like KEGG-path:map04130 or KEGG-path:hsa04130
pathway_id = 'KEGG-' + kegg_pathway_num
graph.addTriple(
pathway_id,
self.globaltt['causally upstream of or within'],
disease_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_pathway_disease",
"(",
"self",
",",
"limit",
")",
":",
"LOG",
".",
"info",
"(",
"\"Processing KEGG pathways to disease ids\"",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'pathway_disease'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"disease_id",
",",
"kegg_pathway_num",
")",
"=",
"row",
"if",
"self",
".",
"test_mode",
"and",
"kegg_pathway_num",
"not",
"in",
"self",
".",
"test_ids",
"[",
"'pathway'",
"]",
":",
"continue",
"disease_id",
"=",
"'KEGG-'",
"+",
"disease_id",
"# will look like KEGG-path:map04130 or KEGG-path:hsa04130",
"pathway_id",
"=",
"'KEGG-'",
"+",
"kegg_pathway_num",
"graph",
".",
"addTriple",
"(",
"pathway_id",
",",
"self",
".",
"globaltt",
"[",
"'causally upstream of or within'",
"]",
",",
"disease_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
We make a link between the pathway identifiers,
and any diseases associated with them.
Since we model diseases as processes, we make a triple saying that
the pathway may be causally upstream of or within the disease process.
:param limit:
:return:
|
[
"We",
"make",
"a",
"link",
"between",
"the",
"pathway",
"identifiers",
"and",
"any",
"diseases",
"associated",
"with",
"them",
".",
"Since",
"we",
"model",
"diseases",
"as",
"processes",
"we",
"make",
"a",
"triple",
"saying",
"that",
"the",
"pathway",
"may",
"be",
"causally",
"upstream",
"of",
"or",
"within",
"the",
"disease",
"process",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L792-L832
|
18,381
|
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
KEGG._make_variant_locus_id
|
def _make_variant_locus_id(self, gene_id, disease_id):
"""
We actually want the association between the gene and the disease
to be via an alternate locus not the "wildtype" gene itself.
so we make an anonymous alternate locus,
and put that in the association
We also make the label for the anonymous class,
and add it to the label hash
:param gene_id:
:param disease_id:
:return:
"""
alt_locus_id = '_:'+re.sub(
r':', '', gene_id) + '-' + re.sub(r':', '', disease_id) + 'VL'
alt_label = self.label_hash.get(gene_id)
disease_label = self.label_hash.get(disease_id)
if alt_label is not None and alt_label != '':
alt_label = 'some variant of ' + str(alt_label)
if disease_label is not None and disease_label != '':
alt_label += ' that is associated with ' + str(disease_label)
else:
alt_label = None
self.label_hash[alt_locus_id] = alt_label
return alt_locus_id
|
python
|
def _make_variant_locus_id(self, gene_id, disease_id):
"""
We actually want the association between the gene and the disease
to be via an alternate locus not the "wildtype" gene itself.
so we make an anonymous alternate locus,
and put that in the association
We also make the label for the anonymous class,
and add it to the label hash
:param gene_id:
:param disease_id:
:return:
"""
alt_locus_id = '_:'+re.sub(
r':', '', gene_id) + '-' + re.sub(r':', '', disease_id) + 'VL'
alt_label = self.label_hash.get(gene_id)
disease_label = self.label_hash.get(disease_id)
if alt_label is not None and alt_label != '':
alt_label = 'some variant of ' + str(alt_label)
if disease_label is not None and disease_label != '':
alt_label += ' that is associated with ' + str(disease_label)
else:
alt_label = None
self.label_hash[alt_locus_id] = alt_label
return alt_locus_id
|
[
"def",
"_make_variant_locus_id",
"(",
"self",
",",
"gene_id",
",",
"disease_id",
")",
":",
"alt_locus_id",
"=",
"'_:'",
"+",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"gene_id",
")",
"+",
"'-'",
"+",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"disease_id",
")",
"+",
"'VL'",
"alt_label",
"=",
"self",
".",
"label_hash",
".",
"get",
"(",
"gene_id",
")",
"disease_label",
"=",
"self",
".",
"label_hash",
".",
"get",
"(",
"disease_id",
")",
"if",
"alt_label",
"is",
"not",
"None",
"and",
"alt_label",
"!=",
"''",
":",
"alt_label",
"=",
"'some variant of '",
"+",
"str",
"(",
"alt_label",
")",
"if",
"disease_label",
"is",
"not",
"None",
"and",
"disease_label",
"!=",
"''",
":",
"alt_label",
"+=",
"' that is associated with '",
"+",
"str",
"(",
"disease_label",
")",
"else",
":",
"alt_label",
"=",
"None",
"self",
".",
"label_hash",
"[",
"alt_locus_id",
"]",
"=",
"alt_label",
"return",
"alt_locus_id"
] |
We actually want the association between the gene and the disease
to be via an alternate locus not the "wildtype" gene itself.
so we make an anonymous alternate locus,
and put that in the association
We also make the label for the anonymous class,
and add it to the label hash
:param gene_id:
:param disease_id:
:return:
|
[
"We",
"actually",
"want",
"the",
"association",
"between",
"the",
"gene",
"and",
"the",
"disease",
"to",
"be",
"via",
"an",
"alternate",
"locus",
"not",
"the",
"wildtype",
"gene",
"itself",
".",
"so",
"we",
"make",
"an",
"anonymous",
"alternate",
"locus",
"and",
"put",
"that",
"in",
"the",
"association",
"We",
"also",
"make",
"the",
"label",
"for",
"the",
"anonymous",
"class",
"and",
"add",
"it",
"to",
"the",
"label",
"hash"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L906-L933
|
18,382
|
monarch-initiative/dipper
|
dipper/sources/CTD.py
|
CTD._fetch_disambiguating_assoc
|
def _fetch_disambiguating_assoc(self):
"""
For any of the items in the chemical-disease association file that have
ambiguous association types we fetch the disambiguated associations
using the batch query API, and store these in a file. Elsewhere, we can
loop through the file and create the appropriate associations.
:return:
"""
disambig_file = '/'.join(
(self.rawdir, self.static_files['publications']['file']))
assoc_file = '/'.join(
(self.rawdir, self.files['chemical_disease_interactions']['file']))
# check if there is a local association file,
# and download if it's dated later than the original intxn file
if os.path.exists(disambig_file):
dfile_dt = os.stat(disambig_file)
afile_dt = os.stat(assoc_file)
if dfile_dt < afile_dt:
LOG.info(
"Local file date before chem-disease assoc file. "
" Downloading...")
else:
LOG.info(
"Local file date after chem-disease assoc file. "
" Skipping download.")
return
all_pubs = set()
dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$')
# first get all the unique publications
with gzip.open(assoc_file, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
if re.match(r'^#', ' '.join(row)):
continue
self._check_list_len(row, 10)
(chem_name, chem_id, cas_rn, disease_name, disease_id,
direct_evidence, inferred_gene_symbol, inference_score,
omim_ids, pubmed_ids) = row
if direct_evidence == '' or not \
re.match(dual_evidence, direct_evidence):
continue
if pubmed_ids is not None and pubmed_ids != '':
all_pubs.update(set(re.split(r'\|', pubmed_ids)))
sorted_pubs = sorted(list(all_pubs))
# now in batches of 4000, we fetch the chemical-disease associations
batch_size = 4000
params = {
'inputType': 'reference',
'report': 'diseases_curated',
'format': 'tsv',
'action': 'Download'
}
url = 'http://ctdbase.org/tools/batchQuery.go?q'
start = 0
end = min((batch_size, len(all_pubs))) # get them in batches of 4000
with open(disambig_file, 'wb') as dmbf:
while start < len(sorted_pubs):
params['inputTerms'] = '|'.join(sorted_pubs[start:end])
# fetch the data from url
LOG.info(
'fetching %d (%d-%d) refs: %s',
len(re.split(r'\|', params['inputTerms'])),
start, end, params['inputTerms'])
data = urllib.parse.urlencode(params)
encoding = 'utf-8'
binary_data = data.encode(encoding)
req = urllib.request.Request(url, binary_data)
resp = urllib.request.urlopen(req)
dmbf.write(resp.read())
start = end
end = min((start + batch_size, len(sorted_pubs)))
return
|
python
|
def _fetch_disambiguating_assoc(self):
"""
For any of the items in the chemical-disease association file that have
ambiguous association types we fetch the disambiguated associations
using the batch query API, and store these in a file. Elsewhere, we can
loop through the file and create the appropriate associations.
:return:
"""
disambig_file = '/'.join(
(self.rawdir, self.static_files['publications']['file']))
assoc_file = '/'.join(
(self.rawdir, self.files['chemical_disease_interactions']['file']))
# check if there is a local association file,
# and download if it's dated later than the original intxn file
if os.path.exists(disambig_file):
dfile_dt = os.stat(disambig_file)
afile_dt = os.stat(assoc_file)
if dfile_dt < afile_dt:
LOG.info(
"Local file date before chem-disease assoc file. "
" Downloading...")
else:
LOG.info(
"Local file date after chem-disease assoc file. "
" Skipping download.")
return
all_pubs = set()
dual_evidence = re.compile(r'^marker\/mechanism\|therapeutic$')
# first get all the unique publications
with gzip.open(assoc_file, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
if re.match(r'^#', ' '.join(row)):
continue
self._check_list_len(row, 10)
(chem_name, chem_id, cas_rn, disease_name, disease_id,
direct_evidence, inferred_gene_symbol, inference_score,
omim_ids, pubmed_ids) = row
if direct_evidence == '' or not \
re.match(dual_evidence, direct_evidence):
continue
if pubmed_ids is not None and pubmed_ids != '':
all_pubs.update(set(re.split(r'\|', pubmed_ids)))
sorted_pubs = sorted(list(all_pubs))
# now in batches of 4000, we fetch the chemical-disease associations
batch_size = 4000
params = {
'inputType': 'reference',
'report': 'diseases_curated',
'format': 'tsv',
'action': 'Download'
}
url = 'http://ctdbase.org/tools/batchQuery.go?q'
start = 0
end = min((batch_size, len(all_pubs))) # get them in batches of 4000
with open(disambig_file, 'wb') as dmbf:
while start < len(sorted_pubs):
params['inputTerms'] = '|'.join(sorted_pubs[start:end])
# fetch the data from url
LOG.info(
'fetching %d (%d-%d) refs: %s',
len(re.split(r'\|', params['inputTerms'])),
start, end, params['inputTerms'])
data = urllib.parse.urlencode(params)
encoding = 'utf-8'
binary_data = data.encode(encoding)
req = urllib.request.Request(url, binary_data)
resp = urllib.request.urlopen(req)
dmbf.write(resp.read())
start = end
end = min((start + batch_size, len(sorted_pubs)))
return
|
[
"def",
"_fetch_disambiguating_assoc",
"(",
"self",
")",
":",
"disambig_file",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"static_files",
"[",
"'publications'",
"]",
"[",
"'file'",
"]",
")",
")",
"assoc_file",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'chemical_disease_interactions'",
"]",
"[",
"'file'",
"]",
")",
")",
"# check if there is a local association file,",
"# and download if it's dated later than the original intxn file",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"disambig_file",
")",
":",
"dfile_dt",
"=",
"os",
".",
"stat",
"(",
"disambig_file",
")",
"afile_dt",
"=",
"os",
".",
"stat",
"(",
"assoc_file",
")",
"if",
"dfile_dt",
"<",
"afile_dt",
":",
"LOG",
".",
"info",
"(",
"\"Local file date before chem-disease assoc file. \"",
"\" Downloading...\"",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"Local file date after chem-disease assoc file. \"",
"\" Skipping download.\"",
")",
"return",
"all_pubs",
"=",
"set",
"(",
")",
"dual_evidence",
"=",
"re",
".",
"compile",
"(",
"r'^marker\\/mechanism\\|therapeutic$'",
")",
"# first get all the unique publications",
"with",
"gzip",
".",
"open",
"(",
"assoc_file",
",",
"'rt'",
")",
"as",
"tsvfile",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"tsvfile",
",",
"delimiter",
"=",
"\"\\t\"",
")",
"for",
"row",
"in",
"reader",
":",
"if",
"re",
".",
"match",
"(",
"r'^#'",
",",
"' '",
".",
"join",
"(",
"row",
")",
")",
":",
"continue",
"self",
".",
"_check_list_len",
"(",
"row",
",",
"10",
")",
"(",
"chem_name",
",",
"chem_id",
",",
"cas_rn",
",",
"disease_name",
",",
"disease_id",
",",
"direct_evidence",
",",
"inferred_gene_symbol",
",",
"inference_score",
",",
"omim_ids",
",",
"pubmed_ids",
")",
"=",
"row",
"if",
"direct_evidence",
"==",
"''",
"or",
"not",
"re",
".",
"match",
"(",
"dual_evidence",
",",
"direct_evidence",
")",
":",
"continue",
"if",
"pubmed_ids",
"is",
"not",
"None",
"and",
"pubmed_ids",
"!=",
"''",
":",
"all_pubs",
".",
"update",
"(",
"set",
"(",
"re",
".",
"split",
"(",
"r'\\|'",
",",
"pubmed_ids",
")",
")",
")",
"sorted_pubs",
"=",
"sorted",
"(",
"list",
"(",
"all_pubs",
")",
")",
"# now in batches of 4000, we fetch the chemical-disease associations",
"batch_size",
"=",
"4000",
"params",
"=",
"{",
"'inputType'",
":",
"'reference'",
",",
"'report'",
":",
"'diseases_curated'",
",",
"'format'",
":",
"'tsv'",
",",
"'action'",
":",
"'Download'",
"}",
"url",
"=",
"'http://ctdbase.org/tools/batchQuery.go?q'",
"start",
"=",
"0",
"end",
"=",
"min",
"(",
"(",
"batch_size",
",",
"len",
"(",
"all_pubs",
")",
")",
")",
"# get them in batches of 4000",
"with",
"open",
"(",
"disambig_file",
",",
"'wb'",
")",
"as",
"dmbf",
":",
"while",
"start",
"<",
"len",
"(",
"sorted_pubs",
")",
":",
"params",
"[",
"'inputTerms'",
"]",
"=",
"'|'",
".",
"join",
"(",
"sorted_pubs",
"[",
"start",
":",
"end",
"]",
")",
"# fetch the data from url",
"LOG",
".",
"info",
"(",
"'fetching %d (%d-%d) refs: %s'",
",",
"len",
"(",
"re",
".",
"split",
"(",
"r'\\|'",
",",
"params",
"[",
"'inputTerms'",
"]",
")",
")",
",",
"start",
",",
"end",
",",
"params",
"[",
"'inputTerms'",
"]",
")",
"data",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"params",
")",
"encoding",
"=",
"'utf-8'",
"binary_data",
"=",
"data",
".",
"encode",
"(",
"encoding",
")",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
",",
"binary_data",
")",
"resp",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"req",
")",
"dmbf",
".",
"write",
"(",
"resp",
".",
"read",
"(",
")",
")",
"start",
"=",
"end",
"end",
"=",
"min",
"(",
"(",
"start",
"+",
"batch_size",
",",
"len",
"(",
"sorted_pubs",
")",
")",
")",
"return"
] |
For any of the items in the chemical-disease association file that have
ambiguous association types we fetch the disambiguated associations
using the batch query API, and store these in a file. Elsewhere, we can
loop through the file and create the appropriate associations.
:return:
|
[
"For",
"any",
"of",
"the",
"items",
"in",
"the",
"chemical",
"-",
"disease",
"association",
"file",
"that",
"have",
"ambiguous",
"association",
"types",
"we",
"fetch",
"the",
"disambiguated",
"associations",
"using",
"the",
"batch",
"query",
"API",
"and",
"store",
"these",
"in",
"a",
"file",
".",
"Elsewhere",
"we",
"can",
"loop",
"through",
"the",
"file",
"and",
"create",
"the",
"appropriate",
"associations",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L243-L323
|
18,383
|
monarch-initiative/dipper
|
dipper/sources/CTD.py
|
CTD._make_association
|
def _make_association(self, subject_id, object_id, rel_id, pubmed_ids):
"""
Make a reified association given an array of pubmed identifiers.
Args:
:param subject_id id of the subject of the association (gene/chem)
:param object_id id of the object of the association (disease)
:param rel_id relationship id
:param pubmed_ids an array of pubmed identifiers
Returns:
:return None
"""
# TODO pass in the relevant Assoc class rather than relying on G2P
assoc = G2PAssoc(self.graph, self.name, subject_id, object_id, rel_id)
if pubmed_ids is not None and len(pubmed_ids) > 0:
for pmid in pubmed_ids:
ref = Reference(
self.graph, pmid, self.globaltt['journal article'])
ref.addRefToGraph()
assoc.add_source(pmid)
assoc.add_evidence(self.globaltt['traceable author statement'])
assoc.add_association_to_graph()
return
|
python
|
def _make_association(self, subject_id, object_id, rel_id, pubmed_ids):
"""
Make a reified association given an array of pubmed identifiers.
Args:
:param subject_id id of the subject of the association (gene/chem)
:param object_id id of the object of the association (disease)
:param rel_id relationship id
:param pubmed_ids an array of pubmed identifiers
Returns:
:return None
"""
# TODO pass in the relevant Assoc class rather than relying on G2P
assoc = G2PAssoc(self.graph, self.name, subject_id, object_id, rel_id)
if pubmed_ids is not None and len(pubmed_ids) > 0:
for pmid in pubmed_ids:
ref = Reference(
self.graph, pmid, self.globaltt['journal article'])
ref.addRefToGraph()
assoc.add_source(pmid)
assoc.add_evidence(self.globaltt['traceable author statement'])
assoc.add_association_to_graph()
return
|
[
"def",
"_make_association",
"(",
"self",
",",
"subject_id",
",",
"object_id",
",",
"rel_id",
",",
"pubmed_ids",
")",
":",
"# TODO pass in the relevant Assoc class rather than relying on G2P",
"assoc",
"=",
"G2PAssoc",
"(",
"self",
".",
"graph",
",",
"self",
".",
"name",
",",
"subject_id",
",",
"object_id",
",",
"rel_id",
")",
"if",
"pubmed_ids",
"is",
"not",
"None",
"and",
"len",
"(",
"pubmed_ids",
")",
">",
"0",
":",
"for",
"pmid",
"in",
"pubmed_ids",
":",
"ref",
"=",
"Reference",
"(",
"self",
".",
"graph",
",",
"pmid",
",",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
")",
"ref",
".",
"addRefToGraph",
"(",
")",
"assoc",
".",
"add_source",
"(",
"pmid",
")",
"assoc",
".",
"add_evidence",
"(",
"self",
".",
"globaltt",
"[",
"'traceable author statement'",
"]",
")",
"assoc",
".",
"add_association_to_graph",
"(",
")",
"return"
] |
Make a reified association given an array of pubmed identifiers.
Args:
:param subject_id id of the subject of the association (gene/chem)
:param object_id id of the object of the association (disease)
:param rel_id relationship id
:param pubmed_ids an array of pubmed identifiers
Returns:
:return None
|
[
"Make",
"a",
"reified",
"association",
"given",
"an",
"array",
"of",
"pubmed",
"identifiers",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L485-L510
|
18,384
|
monarch-initiative/dipper
|
dipper/sources/Bgee.py
|
Bgee.checkIfRemoteIsNewer
|
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify):
"""
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
"""
is_remote_newer = False
status = os.stat(localfile)
LOG.info(
"\nLocal file size: %i"
"\nLocal Timestamp: %s",
status[ST_SIZE], datetime.fromtimestamp(status.st_mtime))
remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify)
if remote_dt != datetime.fromtimestamp(status.st_mtime) or \
status[ST_SIZE] != int(remote_size):
is_remote_newer = True
LOG.info(
"Object on server is has different size %i and/or date %s",
remote_size, remote_dt)
return is_remote_newer
|
python
|
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify):
"""
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
"""
is_remote_newer = False
status = os.stat(localfile)
LOG.info(
"\nLocal file size: %i"
"\nLocal Timestamp: %s",
status[ST_SIZE], datetime.fromtimestamp(status.st_mtime))
remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify)
if remote_dt != datetime.fromtimestamp(status.st_mtime) or \
status[ST_SIZE] != int(remote_size):
is_remote_newer = True
LOG.info(
"Object on server is has different size %i and/or date %s",
remote_size, remote_dt)
return is_remote_newer
|
[
"def",
"checkIfRemoteIsNewer",
"(",
"self",
",",
"localfile",
",",
"remote_size",
",",
"remote_modify",
")",
":",
"is_remote_newer",
"=",
"False",
"status",
"=",
"os",
".",
"stat",
"(",
"localfile",
")",
"LOG",
".",
"info",
"(",
"\"\\nLocal file size: %i\"",
"\"\\nLocal Timestamp: %s\"",
",",
"status",
"[",
"ST_SIZE",
"]",
",",
"datetime",
".",
"fromtimestamp",
"(",
"status",
".",
"st_mtime",
")",
")",
"remote_dt",
"=",
"Bgee",
".",
"_convert_ftp_time_to_iso",
"(",
"remote_modify",
")",
"if",
"remote_dt",
"!=",
"datetime",
".",
"fromtimestamp",
"(",
"status",
".",
"st_mtime",
")",
"or",
"status",
"[",
"ST_SIZE",
"]",
"!=",
"int",
"(",
"remote_size",
")",
":",
"is_remote_newer",
"=",
"True",
"LOG",
".",
"info",
"(",
"\"Object on server is has different size %i and/or date %s\"",
",",
"remote_size",
",",
"remote_dt",
")",
"return",
"is_remote_newer"
] |
Overrides checkIfRemoteIsNewer in Source class
:param localfile: str file path
:param remote_size: str bytes
:param remote_modify: str last modify date in the form 20160705042714
:return: boolean True if remote file is newer else False
|
[
"Overrides",
"checkIfRemoteIsNewer",
"in",
"Source",
"class"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L232-L256
|
18,385
|
monarch-initiative/dipper
|
dipper/sources/Bgee.py
|
Bgee._convert_ftp_time_to_iso
|
def _convert_ftp_time_to_iso(ftp_time):
"""
Convert datetime in the format 20160705042714 to a datetime object
:return: datetime object
"""
date_time = datetime(
int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]),
int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14]))
return date_time
|
python
|
def _convert_ftp_time_to_iso(ftp_time):
"""
Convert datetime in the format 20160705042714 to a datetime object
:return: datetime object
"""
date_time = datetime(
int(ftp_time[:4]), int(ftp_time[4:6]), int(ftp_time[6:8]),
int(ftp_time[8:10]), int(ftp_time[10:12]), int(ftp_time[12:14]))
return date_time
|
[
"def",
"_convert_ftp_time_to_iso",
"(",
"ftp_time",
")",
":",
"date_time",
"=",
"datetime",
"(",
"int",
"(",
"ftp_time",
"[",
":",
"4",
"]",
")",
",",
"int",
"(",
"ftp_time",
"[",
"4",
":",
"6",
"]",
")",
",",
"int",
"(",
"ftp_time",
"[",
"6",
":",
"8",
"]",
")",
",",
"int",
"(",
"ftp_time",
"[",
"8",
":",
"10",
"]",
")",
",",
"int",
"(",
"ftp_time",
"[",
"10",
":",
"12",
"]",
")",
",",
"int",
"(",
"ftp_time",
"[",
"12",
":",
"14",
"]",
")",
")",
"return",
"date_time"
] |
Convert datetime in the format 20160705042714 to a datetime object
:return: datetime object
|
[
"Convert",
"datetime",
"in",
"the",
"format",
"20160705042714",
"to",
"a",
"datetime",
"object"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L259-L268
|
18,386
|
monarch-initiative/dipper
|
dipper/sources/EOM.py
|
EOM.fetch
|
def fetch(self, is_dl_forced=False):
'''connection details for DISCO'''
cxn = {}
cxn['host'] = 'nif-db.crbs.ucsd.edu'
cxn['database'] = 'disco_crawler'
cxn['port'] = '5432'
cxn['user'] = config.get_config()['user']['disco']
cxn['password'] = config.get_config()['keys'][cxn['user']]
self.dataset.setFileAccessUrl(
'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'],
is_object_literal=True)
# process the tables
# self.fetch_from_pgdb(self.tables,cxn,100) #for testing
self.fetch_from_pgdb(self.tables, cxn)
self.get_files(is_dl_forced)
# FIXME: Everything needed for data provenance?
fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')))
filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setVersion(filedate)
return
|
python
|
def fetch(self, is_dl_forced=False):
'''connection details for DISCO'''
cxn = {}
cxn['host'] = 'nif-db.crbs.ucsd.edu'
cxn['database'] = 'disco_crawler'
cxn['port'] = '5432'
cxn['user'] = config.get_config()['user']['disco']
cxn['password'] = config.get_config()['keys'][cxn['user']]
self.dataset.setFileAccessUrl(
'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'],
is_object_literal=True)
# process the tables
# self.fetch_from_pgdb(self.tables,cxn,100) #for testing
self.fetch_from_pgdb(self.tables, cxn)
self.get_files(is_dl_forced)
# FIXME: Everything needed for data provenance?
fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')))
filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setVersion(filedate)
return
|
[
"def",
"fetch",
"(",
"self",
",",
"is_dl_forced",
"=",
"False",
")",
":",
"cxn",
"=",
"{",
"}",
"cxn",
"[",
"'host'",
"]",
"=",
"'nif-db.crbs.ucsd.edu'",
"cxn",
"[",
"'database'",
"]",
"=",
"'disco_crawler'",
"cxn",
"[",
"'port'",
"]",
"=",
"'5432'",
"cxn",
"[",
"'user'",
"]",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'user'",
"]",
"[",
"'disco'",
"]",
"cxn",
"[",
"'password'",
"]",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'keys'",
"]",
"[",
"cxn",
"[",
"'user'",
"]",
"]",
"self",
".",
"dataset",
".",
"setFileAccessUrl",
"(",
"'jdbc:postgresql://'",
"+",
"cxn",
"[",
"'host'",
"]",
"+",
"':'",
"+",
"cxn",
"[",
"'port'",
"]",
"+",
"'/'",
"+",
"cxn",
"[",
"'database'",
"]",
",",
"is_object_literal",
"=",
"True",
")",
"# process the tables",
"# self.fetch_from_pgdb(self.tables,cxn,100) #for testing",
"self",
".",
"fetch_from_pgdb",
"(",
"self",
".",
"tables",
",",
"cxn",
")",
"self",
".",
"get_files",
"(",
"is_dl_forced",
")",
"# FIXME: Everything needed for data provenance?",
"fstat",
"=",
"os",
".",
"stat",
"(",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'dvp.pr_nlx_157874_1'",
")",
")",
")",
"filedate",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"fstat",
"[",
"ST_CTIME",
"]",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"self",
".",
"dataset",
".",
"setVersion",
"(",
"filedate",
")",
"return"
] |
connection details for DISCO
|
[
"connection",
"details",
"for",
"DISCO"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L63-L87
|
18,387
|
monarch-initiative/dipper
|
dipper/sources/EOM.py
|
EOM.parse
|
def parse(self, limit=None):
'''
Over ride Source.parse inherited via PostgreSQLSource
'''
if limit is not None:
LOG.info("Only parsing first %s rows of each file", limit)
if self.test_only:
self.test_mode = True
LOG.info("Parsing files...")
self._process_nlx_157874_1_view(
'/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit)
self._map_eom_terms(
'/'.join((self.rawdir, self.files['map']['file'])), limit)
LOG.info("Finished parsing.")
# since it's so small,
# we default to copying the entire graph to the test set
self.testgraph = self.graph
return
|
python
|
def parse(self, limit=None):
'''
Over ride Source.parse inherited via PostgreSQLSource
'''
if limit is not None:
LOG.info("Only parsing first %s rows of each file", limit)
if self.test_only:
self.test_mode = True
LOG.info("Parsing files...")
self._process_nlx_157874_1_view(
'/'.join((self.rawdir, 'dvp.pr_nlx_157874_1')), limit)
self._map_eom_terms(
'/'.join((self.rawdir, self.files['map']['file'])), limit)
LOG.info("Finished parsing.")
# since it's so small,
# we default to copying the entire graph to the test set
self.testgraph = self.graph
return
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %s rows of each file\"",
",",
"limit",
")",
"if",
"self",
".",
"test_only",
":",
"self",
".",
"test_mode",
"=",
"True",
"LOG",
".",
"info",
"(",
"\"Parsing files...\"",
")",
"self",
".",
"_process_nlx_157874_1_view",
"(",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'dvp.pr_nlx_157874_1'",
")",
")",
",",
"limit",
")",
"self",
".",
"_map_eom_terms",
"(",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'map'",
"]",
"[",
"'file'",
"]",
")",
")",
",",
"limit",
")",
"LOG",
".",
"info",
"(",
"\"Finished parsing.\"",
")",
"# since it's so small,",
"# we default to copying the entire graph to the test set",
"self",
".",
"testgraph",
"=",
"self",
".",
"graph",
"return"
] |
Over ride Source.parse inherited via PostgreSQLSource
|
[
"Over",
"ride",
"Source",
".",
"parse",
"inherited",
"via",
"PostgreSQLSource"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L89-L113
|
18,388
|
monarch-initiative/dipper
|
dipper/sources/MGI.py
|
MGI._process_gxd_genotype_view
|
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
raw = '/'.join((self.rawdir, 'gxd_genotype_view'))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as f1:
f1.readline() # read the header row; skip
for line in f1:
line = line.rstrip("\n")
line_counter += 1
(genotype_key, strain_key, strain, mgiid) = line.split('\t')
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._makeInternalIdentifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._makeInternalIdentifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + ".")
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
python
|
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
raw = '/'.join((self.rawdir, 'gxd_genotype_view'))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as f1:
f1.readline() # read the header row; skip
for line in f1:
line = line.rstrip("\n")
line_counter += 1
(genotype_key, strain_key, strain, mgiid) = line.split('\t')
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._makeInternalIdentifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._makeInternalIdentifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + ".")
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_gxd_genotype_view",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"line_counter",
"=",
"0",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'gxd_genotype_view'",
")",
")",
"LOG",
".",
"info",
"(",
"\"getting genotypes and their backgrounds\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f1",
":",
"f1",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"f1",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"line_counter",
"+=",
"1",
"(",
"genotype_key",
",",
"strain_key",
",",
"strain",
",",
"mgiid",
")",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"self",
".",
"test_mode",
"is",
"True",
":",
"if",
"int",
"(",
"genotype_key",
")",
"not",
"in",
"self",
".",
"test_keys",
".",
"get",
"(",
"'genotype'",
")",
":",
"continue",
"if",
"self",
".",
"idhash",
"[",
"'genotype'",
"]",
".",
"get",
"(",
"genotype_key",
")",
"is",
"None",
":",
"# just in case we haven't seen it before,",
"# catch and add the id mapping here",
"self",
".",
"idhash",
"[",
"'genotype'",
"]",
"[",
"genotype_key",
"]",
"=",
"mgiid",
"geno",
".",
"addGenotype",
"(",
"mgiid",
",",
"None",
")",
"# the label is elsewhere...",
"# need to add the MGI label as a synonym",
"# if it's in the hash,",
"# assume that the individual was created elsewhere",
"strain_id",
"=",
"self",
".",
"idhash",
"[",
"'strain'",
"]",
".",
"get",
"(",
"strain_key",
")",
"background_type",
"=",
"self",
".",
"globaltt",
"[",
"'genomic_background'",
"]",
"if",
"strain_id",
"is",
"None",
"or",
"int",
"(",
"strain_key",
")",
"<",
"0",
":",
"if",
"strain_id",
"is",
"None",
":",
"# some of the strains don't have public identifiers!",
"# so we make one up, and add it to the hash",
"strain_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'strain'",
",",
"strain_key",
")",
"self",
".",
"idhash",
"[",
"'strain'",
"]",
".",
"update",
"(",
"{",
"strain_key",
":",
"strain_id",
"}",
")",
"model",
".",
"addComment",
"(",
"strain_id",
",",
"\"strain_key:\"",
"+",
"strain_key",
")",
"elif",
"int",
"(",
"strain_key",
")",
"<",
"0",
":",
"# these are ones that are unidentified/unknown.",
"# so add instances of each.",
"strain_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'strain'",
",",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"str",
"(",
"strain_id",
")",
")",
")",
"strain_id",
"+=",
"re",
".",
"sub",
"(",
"r':'",
",",
"''",
",",
"str",
"(",
"mgiid",
")",
")",
"strain_id",
"=",
"re",
".",
"sub",
"(",
"r'^_'",
",",
"'_:'",
",",
"strain_id",
")",
"strain_id",
"=",
"re",
".",
"sub",
"(",
"r'::'",
",",
"':'",
",",
"strain_id",
")",
"model",
".",
"addDescription",
"(",
"strain_id",
",",
"\"This genomic background is unknown. \"",
"+",
"\"This is a placeholder background for \"",
"+",
"mgiid",
"+",
"\".\"",
")",
"background_type",
"=",
"self",
".",
"globaltt",
"[",
"'unspecified_genomic_background'",
"]",
"# add it back to the idhash",
"LOG",
".",
"info",
"(",
"\"adding background as internal id: %s %s: %s\"",
",",
"strain_key",
",",
"strain",
",",
"strain_id",
")",
"geno",
".",
"addGenomicBackgroundToGenotype",
"(",
"strain_id",
",",
"mgiid",
",",
"background_type",
")",
"self",
".",
"label_hash",
"[",
"strain_id",
"]",
"=",
"strain",
"# add BG to a hash so we can build the genotype label later",
"self",
".",
"geno_bkgd",
"[",
"mgiid",
"]",
"=",
"strain_id",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
|
[
"This",
"table",
"indicates",
"the",
"relationship",
"between",
"a",
"genotype",
"and",
"it",
"s",
"background",
"strain",
".",
"It",
"leverages",
"the",
"Genotype",
"class",
"methods",
"to",
"do",
"this",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L335-L430
|
18,389
|
monarch-initiative/dipper
|
dipper/sources/MGI.py
|
MGI._process_gxd_genotype_summary_view
|
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno_hash = {}
raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view'))
LOG.info("building labels for genotypes")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(object_key, preferred, mgiid, subtype,
short_description) = line.split('\t')
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_counter > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a synonym
# (we generate our own label later)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
geno.addGenotype(gt, None)
model.addComment(gt, self._makeInternalIdentifier(
'genotype', genotype.get('key')))
model.addSynonym(gt, label.strip())
return
|
python
|
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
geno_hash = {}
raw = '/'.join((self.rawdir, 'gxd_genotype_summary_view'))
LOG.info("building labels for genotypes")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(object_key, preferred, mgiid, subtype,
short_description) = line.split('\t')
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_counter > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a synonym
# (we generate our own label later)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
geno.addGenotype(gt, None)
model.addComment(gt, self._makeInternalIdentifier(
'genotype', genotype.get('key')))
model.addSynonym(gt, label.strip())
return
|
[
"def",
"_process_gxd_genotype_summary_view",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"geno_hash",
"=",
"{",
"}",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'gxd_genotype_summary_view'",
")",
")",
"LOG",
".",
"info",
"(",
"\"building labels for genotypes\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"line_counter",
"+=",
"1",
"(",
"object_key",
",",
"preferred",
",",
"mgiid",
",",
"subtype",
",",
"short_description",
")",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"self",
".",
"test_mode",
"is",
"True",
":",
"if",
"int",
"(",
"object_key",
")",
"not",
"in",
"self",
".",
"test_keys",
".",
"get",
"(",
"'genotype'",
")",
":",
"continue",
"# add the internal genotype to mgi mapping",
"self",
".",
"idhash",
"[",
"'genotype'",
"]",
"[",
"object_key",
"]",
"=",
"mgiid",
"if",
"preferred",
"==",
"'1'",
":",
"d",
"=",
"re",
".",
"sub",
"(",
"r'\\,'",
",",
"'/'",
",",
"short_description",
".",
"strip",
"(",
")",
")",
"if",
"mgiid",
"not",
"in",
"geno_hash",
":",
"geno_hash",
"[",
"mgiid",
"]",
"=",
"{",
"'vslcs'",
":",
"[",
"d",
"]",
",",
"'subtype'",
":",
"subtype",
",",
"'key'",
":",
"object_key",
"}",
"else",
":",
"vslcs",
"=",
"geno_hash",
"[",
"mgiid",
"]",
".",
"get",
"(",
"'vslcs'",
")",
"vslcs",
".",
"append",
"(",
"d",
")",
"else",
":",
"pass",
"# TODO what to do with != preferred",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"# now, loop through the hash and add the genotypes as individuals",
"# we add the mgi genotype as a synonym",
"# (we generate our own label later)",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"for",
"gt",
"in",
"geno_hash",
":",
"genotype",
"=",
"geno_hash",
".",
"get",
"(",
"gt",
")",
"gvc",
"=",
"sorted",
"(",
"genotype",
".",
"get",
"(",
"'vslcs'",
")",
")",
"label",
"=",
"'; '",
".",
"join",
"(",
"gvc",
")",
"+",
"' ['",
"+",
"genotype",
".",
"get",
"(",
"'subtype'",
")",
"+",
"']'",
"geno",
".",
"addGenotype",
"(",
"gt",
",",
"None",
")",
"model",
".",
"addComment",
"(",
"gt",
",",
"self",
".",
"_makeInternalIdentifier",
"(",
"'genotype'",
",",
"genotype",
".",
"get",
"(",
"'key'",
")",
")",
")",
"model",
".",
"addSynonym",
"(",
"gt",
",",
"label",
".",
"strip",
"(",
")",
")",
"return"
] |
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
|
[
"Add",
"the",
"genotype",
"internal",
"id",
"to",
"mgiid",
"mapping",
"to",
"the",
"idhashmap",
".",
"Also",
"add",
"them",
"as",
"individuals",
"to",
"the",
"graph",
".",
"We",
"re",
"-",
"format",
"the",
"label",
"to",
"put",
"the",
"background",
"strain",
"in",
"brackets",
"after",
"the",
"gvc",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L432-L503
|
18,390
|
monarch-initiative/dipper
|
dipper/sources/MGI.py
|
MGI.process_mgi_relationship_transgene_genes
|
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, 'mgi_relationship_transgene_genes'))
geno = Genotype(graph)
col = [
'rel_key', 'allele_key', 'allele_id', 'allele_label', 'category_key',
'category_name', 'property_key', 'property_name', 'gene_num'
]
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
header = next(filereader)
if header != col:
LOG.error('expected columns: %s\n\tBut got:\n%s', col, header)
for row in filereader:
# rel_key,
allele_key = int(row[col.index('allele_key')])
allele_id = row[col.index('allele_id')]
# allele_label,
# category_key,
# category_name,
# property_key,
# property_name,
gene_num = int(row[col.index('gene_num')])
if self.test_mode and allele_key not in self.test_keys.get('allele')\
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
return
|
python
|
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, 'mgi_relationship_transgene_genes'))
geno = Genotype(graph)
col = [
'rel_key', 'allele_key', 'allele_id', 'allele_label', 'category_key',
'category_name', 'property_key', 'property_name', 'gene_num'
]
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
header = next(filereader)
if header != col:
LOG.error('expected columns: %s\n\tBut got:\n%s', col, header)
for row in filereader:
# rel_key,
allele_key = int(row[col.index('allele_key')])
allele_id = row[col.index('allele_id')]
# allele_label,
# category_key,
# category_name,
# property_key,
# property_name,
gene_num = int(row[col.index('gene_num')])
if self.test_mode and allele_key not in self.test_keys.get('allele')\
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
return
|
[
"def",
"process_mgi_relationship_transgene_genes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"LOG",
".",
"info",
"(",
"\"getting transgene genes\"",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'mgi_relationship_transgene_genes'",
")",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"col",
"=",
"[",
"'rel_key'",
",",
"'allele_key'",
",",
"'allele_id'",
",",
"'allele_label'",
",",
"'category_key'",
",",
"'category_name'",
",",
"'property_key'",
",",
"'property_name'",
",",
"'gene_num'",
"]",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"header",
"=",
"next",
"(",
"filereader",
")",
"if",
"header",
"!=",
"col",
":",
"LOG",
".",
"error",
"(",
"'expected columns: %s\\n\\tBut got:\\n%s'",
",",
"col",
",",
"header",
")",
"for",
"row",
"in",
"filereader",
":",
"# rel_key,",
"allele_key",
"=",
"int",
"(",
"row",
"[",
"col",
".",
"index",
"(",
"'allele_key'",
")",
"]",
")",
"allele_id",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'allele_id'",
")",
"]",
"# allele_label,",
"# category_key,",
"# category_name,",
"# property_key,",
"# property_name,",
"gene_num",
"=",
"int",
"(",
"row",
"[",
"col",
".",
"index",
"(",
"'gene_num'",
")",
"]",
")",
"if",
"self",
".",
"test_mode",
"and",
"allele_key",
"not",
"in",
"self",
".",
"test_keys",
".",
"get",
"(",
"'allele'",
")",
"and",
"gene_num",
"not",
"in",
"self",
".",
"test_ids",
":",
"continue",
"gene_id",
"=",
"'NCBIGene:'",
"+",
"str",
"(",
"gene_num",
")",
"# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])",
"seqalt_id",
"=",
"self",
".",
"idhash",
"[",
"'seqalt'",
"]",
".",
"get",
"(",
"allele_key",
")",
"if",
"seqalt_id",
"is",
"None",
":",
"seqalt_id",
"=",
"allele_id",
"geno",
".",
"addSequenceDerivesFrom",
"(",
"seqalt_id",
",",
"gene_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"filereader",
".",
"line_num",
">",
"limit",
":",
"break",
"return"
] |
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
|
[
"Here",
"we",
"have",
"the",
"relationship",
"between",
"MGI",
"transgene",
"alleles",
"and",
"the",
"non",
"-",
"mouse",
"gene",
"ids",
"that",
"are",
"part",
"of",
"them",
".",
"We",
"augment",
"the",
"allele",
"with",
"the",
"transgene",
"parts",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L1891-L1944
|
18,391
|
monarch-initiative/dipper
|
dipper/graph/RDFGraph.py
|
RDFGraph._getnode
|
def _getnode(self, curie): # convention is lowercase names
"""
This is a wrapper for creating a URIRef or Bnode object
with a given a curie or iri as a string.
If an id starts with an underscore, it assigns it to a BNode, otherwise
it creates it with a standard URIRef.
Alternatively, self.skolemize_blank_node is True,
it will skolemize the blank node
:param curie: str identifier formatted as curie or iri
:return: node: RDFLib URIRef or BNode object
"""
node = None
if curie[0] == '_':
if self.are_bnodes_skized is True:
node = self.skolemizeBlankNode(curie)
else: # delete the leading underscore to make it cleaner
node = BNode(re.sub(r'^_:|^_', '', curie, 1))
# Check if curie string is actually an IRI
elif curie[:4] == 'http' or curie[:3] == 'ftp':
node = URIRef(curie)
else:
iri = RDFGraph.curie_util.get_uri(curie)
if iri is not None:
node = URIRef(RDFGraph.curie_util.get_uri(curie))
# Bind prefix map to graph
prefix = curie.split(':')[0]
if prefix not in self.namespace_manager.namespaces():
mapped_iri = self.curie_map[prefix]
self.bind(prefix, Namespace(mapped_iri))
else:
LOG.error("couldn't make URI for %s", curie)
return node
|
python
|
def _getnode(self, curie): # convention is lowercase names
"""
This is a wrapper for creating a URIRef or Bnode object
with a given a curie or iri as a string.
If an id starts with an underscore, it assigns it to a BNode, otherwise
it creates it with a standard URIRef.
Alternatively, self.skolemize_blank_node is True,
it will skolemize the blank node
:param curie: str identifier formatted as curie or iri
:return: node: RDFLib URIRef or BNode object
"""
node = None
if curie[0] == '_':
if self.are_bnodes_skized is True:
node = self.skolemizeBlankNode(curie)
else: # delete the leading underscore to make it cleaner
node = BNode(re.sub(r'^_:|^_', '', curie, 1))
# Check if curie string is actually an IRI
elif curie[:4] == 'http' or curie[:3] == 'ftp':
node = URIRef(curie)
else:
iri = RDFGraph.curie_util.get_uri(curie)
if iri is not None:
node = URIRef(RDFGraph.curie_util.get_uri(curie))
# Bind prefix map to graph
prefix = curie.split(':')[0]
if prefix not in self.namespace_manager.namespaces():
mapped_iri = self.curie_map[prefix]
self.bind(prefix, Namespace(mapped_iri))
else:
LOG.error("couldn't make URI for %s", curie)
return node
|
[
"def",
"_getnode",
"(",
"self",
",",
"curie",
")",
":",
"# convention is lowercase names",
"node",
"=",
"None",
"if",
"curie",
"[",
"0",
"]",
"==",
"'_'",
":",
"if",
"self",
".",
"are_bnodes_skized",
"is",
"True",
":",
"node",
"=",
"self",
".",
"skolemizeBlankNode",
"(",
"curie",
")",
"else",
":",
"# delete the leading underscore to make it cleaner",
"node",
"=",
"BNode",
"(",
"re",
".",
"sub",
"(",
"r'^_:|^_'",
",",
"''",
",",
"curie",
",",
"1",
")",
")",
"# Check if curie string is actually an IRI",
"elif",
"curie",
"[",
":",
"4",
"]",
"==",
"'http'",
"or",
"curie",
"[",
":",
"3",
"]",
"==",
"'ftp'",
":",
"node",
"=",
"URIRef",
"(",
"curie",
")",
"else",
":",
"iri",
"=",
"RDFGraph",
".",
"curie_util",
".",
"get_uri",
"(",
"curie",
")",
"if",
"iri",
"is",
"not",
"None",
":",
"node",
"=",
"URIRef",
"(",
"RDFGraph",
".",
"curie_util",
".",
"get_uri",
"(",
"curie",
")",
")",
"# Bind prefix map to graph",
"prefix",
"=",
"curie",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"if",
"prefix",
"not",
"in",
"self",
".",
"namespace_manager",
".",
"namespaces",
"(",
")",
":",
"mapped_iri",
"=",
"self",
".",
"curie_map",
"[",
"prefix",
"]",
"self",
".",
"bind",
"(",
"prefix",
",",
"Namespace",
"(",
"mapped_iri",
")",
")",
"else",
":",
"LOG",
".",
"error",
"(",
"\"couldn't make URI for %s\"",
",",
"curie",
")",
"return",
"node"
] |
This is a wrapper for creating a URIRef or Bnode object
with a given a curie or iri as a string.
If an id starts with an underscore, it assigns it to a BNode, otherwise
it creates it with a standard URIRef.
Alternatively, self.skolemize_blank_node is True,
it will skolemize the blank node
:param curie: str identifier formatted as curie or iri
:return: node: RDFLib URIRef or BNode object
|
[
"This",
"is",
"a",
"wrapper",
"for",
"creating",
"a",
"URIRef",
"or",
"Bnode",
"object",
"with",
"a",
"given",
"a",
"curie",
"or",
"iri",
"as",
"a",
"string",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/graph/RDFGraph.py#L92-L126
|
18,392
|
monarch-initiative/dipper
|
dipper/models/assoc/D2PAssoc.py
|
D2PAssoc.add_association_to_graph
|
def add_association_to_graph(self):
"""
The reified relationship between a disease and a phenotype is decorated
with some provenance information.
This makes the assumption that both the disease and phenotype
are classes.
:param g:
:return:
"""
# add the basic association nodes
# if rel == self.globaltt[['has disposition']:
Assoc.add_association_to_graph(self)
# anticipating trouble with onsets ranges that look like curies
if self.onset is not None and self.onset != '':
self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset)
if self.frequency is not None and self.frequency != '':
self.graph.addTriple(
self.assoc_id, self.globaltt['frequency'], self.frequency)
return
|
python
|
def add_association_to_graph(self):
"""
The reified relationship between a disease and a phenotype is decorated
with some provenance information.
This makes the assumption that both the disease and phenotype
are classes.
:param g:
:return:
"""
# add the basic association nodes
# if rel == self.globaltt[['has disposition']:
Assoc.add_association_to_graph(self)
# anticipating trouble with onsets ranges that look like curies
if self.onset is not None and self.onset != '':
self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset)
if self.frequency is not None and self.frequency != '':
self.graph.addTriple(
self.assoc_id, self.globaltt['frequency'], self.frequency)
return
|
[
"def",
"add_association_to_graph",
"(",
"self",
")",
":",
"# add the basic association nodes",
"# if rel == self.globaltt[['has disposition']:",
"Assoc",
".",
"add_association_to_graph",
"(",
"self",
")",
"# anticipating trouble with onsets ranges that look like curies",
"if",
"self",
".",
"onset",
"is",
"not",
"None",
"and",
"self",
".",
"onset",
"!=",
"''",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"assoc_id",
",",
"self",
".",
"globaltt",
"[",
"'onset'",
"]",
",",
"self",
".",
"onset",
")",
"if",
"self",
".",
"frequency",
"is",
"not",
"None",
"and",
"self",
".",
"frequency",
"!=",
"''",
":",
"self",
".",
"graph",
".",
"addTriple",
"(",
"self",
".",
"assoc_id",
",",
"self",
".",
"globaltt",
"[",
"'frequency'",
"]",
",",
"self",
".",
"frequency",
")",
"return"
] |
The reified relationship between a disease and a phenotype is decorated
with some provenance information.
This makes the assumption that both the disease and phenotype
are classes.
:param g:
:return:
|
[
"The",
"reified",
"relationship",
"between",
"a",
"disease",
"and",
"a",
"phenotype",
"is",
"decorated",
"with",
"some",
"provenance",
"information",
".",
"This",
"makes",
"the",
"assumption",
"that",
"both",
"the",
"disease",
"and",
"phenotype",
"are",
"classes",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/D2PAssoc.py#L50-L75
|
18,393
|
monarch-initiative/dipper
|
dipper/sources/Monochrom.py
|
Monochrom.make_parent_bands
|
def make_parent_bands(self, band, child_bands):
"""
this will determine the grouping bands that it belongs to, recursively
13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31
:param band:
:param child_bands:
:return:
"""
m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band)
if len(band) > 0:
if m:
p = str(band[0:len(band)-1])
p = re.sub(r'\.$', '', p)
if p is not None:
child_bands.add(p)
self.make_parent_bands(p, child_bands)
else:
child_bands = set()
return child_bands
|
python
|
def make_parent_bands(self, band, child_bands):
"""
this will determine the grouping bands that it belongs to, recursively
13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31
:param band:
:param child_bands:
:return:
"""
m = re.match(r'([pq][A-H\d]+(?:\.\d+)?)', band)
if len(band) > 0:
if m:
p = str(band[0:len(band)-1])
p = re.sub(r'\.$', '', p)
if p is not None:
child_bands.add(p)
self.make_parent_bands(p, child_bands)
else:
child_bands = set()
return child_bands
|
[
"def",
"make_parent_bands",
"(",
"self",
",",
"band",
",",
"child_bands",
")",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'([pq][A-H\\d]+(?:\\.\\d+)?)'",
",",
"band",
")",
"if",
"len",
"(",
"band",
")",
">",
"0",
":",
"if",
"m",
":",
"p",
"=",
"str",
"(",
"band",
"[",
"0",
":",
"len",
"(",
"band",
")",
"-",
"1",
"]",
")",
"p",
"=",
"re",
".",
"sub",
"(",
"r'\\.$'",
",",
"''",
",",
"p",
")",
"if",
"p",
"is",
"not",
"None",
":",
"child_bands",
".",
"add",
"(",
"p",
")",
"self",
".",
"make_parent_bands",
"(",
"p",
",",
"child_bands",
")",
"else",
":",
"child_bands",
"=",
"set",
"(",
")",
"return",
"child_bands"
] |
this will determine the grouping bands that it belongs to, recursively
13q21.31 ==> 13, 13q, 13q2, 13q21, 13q21.3, 13q21.31
:param band:
:param child_bands:
:return:
|
[
"this",
"will",
"determine",
"the",
"grouping",
"bands",
"that",
"it",
"belongs",
"to",
"recursively",
"13q21",
".",
"31",
"==",
">",
"13",
"13q",
"13q2",
"13q21",
"13q21",
".",
"3",
"13q21",
".",
"31"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Monochrom.py#L334-L354
|
18,394
|
monarch-initiative/dipper
|
dipper/utils/CurieUtil.py
|
CurieUtil.get_curie
|
def get_curie(self, uri):
'''Get a CURIE from a URI '''
prefix = self.get_curie_prefix(uri)
if prefix is not None:
key = self.curie_map[prefix]
return '%s:%s' % (prefix, uri[len(key):len(uri)])
return None
|
python
|
def get_curie(self, uri):
'''Get a CURIE from a URI '''
prefix = self.get_curie_prefix(uri)
if prefix is not None:
key = self.curie_map[prefix]
return '%s:%s' % (prefix, uri[len(key):len(uri)])
return None
|
[
"def",
"get_curie",
"(",
"self",
",",
"uri",
")",
":",
"prefix",
"=",
"self",
".",
"get_curie_prefix",
"(",
"uri",
")",
"if",
"prefix",
"is",
"not",
"None",
":",
"key",
"=",
"self",
".",
"curie_map",
"[",
"prefix",
"]",
"return",
"'%s:%s'",
"%",
"(",
"prefix",
",",
"uri",
"[",
"len",
"(",
"key",
")",
":",
"len",
"(",
"uri",
")",
"]",
")",
"return",
"None"
] |
Get a CURIE from a URI
|
[
"Get",
"a",
"CURIE",
"from",
"a",
"URI"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L31-L37
|
18,395
|
monarch-initiative/dipper
|
dipper/utils/CurieUtil.py
|
CurieUtil.get_uri
|
def get_uri(self, curie):
''' Get a URI from a CURIE '''
if curie is None:
return None
parts = curie.split(':')
if len(parts) == 1:
if curie != '':
LOG.error("Not a properly formed curie: \"%s\"", curie)
return None
prefix = parts[0]
if prefix in self.curie_map:
return '%s%s' % (self.curie_map.get(prefix),
curie[(curie.index(':') + 1):])
LOG.error("Curie prefix not defined for %s", curie)
return None
|
python
|
def get_uri(self, curie):
''' Get a URI from a CURIE '''
if curie is None:
return None
parts = curie.split(':')
if len(parts) == 1:
if curie != '':
LOG.error("Not a properly formed curie: \"%s\"", curie)
return None
prefix = parts[0]
if prefix in self.curie_map:
return '%s%s' % (self.curie_map.get(prefix),
curie[(curie.index(':') + 1):])
LOG.error("Curie prefix not defined for %s", curie)
return None
|
[
"def",
"get_uri",
"(",
"self",
",",
"curie",
")",
":",
"if",
"curie",
"is",
"None",
":",
"return",
"None",
"parts",
"=",
"curie",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"if",
"curie",
"!=",
"''",
":",
"LOG",
".",
"error",
"(",
"\"Not a properly formed curie: \\\"%s\\\"\"",
",",
"curie",
")",
"return",
"None",
"prefix",
"=",
"parts",
"[",
"0",
"]",
"if",
"prefix",
"in",
"self",
".",
"curie_map",
":",
"return",
"'%s%s'",
"%",
"(",
"self",
".",
"curie_map",
".",
"get",
"(",
"prefix",
")",
",",
"curie",
"[",
"(",
"curie",
".",
"index",
"(",
"':'",
")",
"+",
"1",
")",
":",
"]",
")",
"LOG",
".",
"error",
"(",
"\"Curie prefix not defined for %s\"",
",",
"curie",
")",
"return",
"None"
] |
Get a URI from a CURIE
|
[
"Get",
"a",
"URI",
"from",
"a",
"CURIE"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L46-L60
|
18,396
|
monarch-initiative/dipper
|
dipper/sources/Coriell.py
|
Coriell.fetch
|
def fetch(self, is_dl_forced=False):
"""
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
"""
host = config.get_config()['dbauth']['coriell']['host']
key = config.get_config()['dbauth']['coriell']['private_key']
user = config.get_config()['user']['coriell']
passwd = config.get_config()['keys'][user]
with pysftp.Connection(
host, username=user, password=passwd, private_key=key) as sftp:
# check to make sure each file is in there
# get the remote files
remote_files = sftp.listdir_attr()
files_by_repo = {}
for attr in remote_files:
# for each catalog, get the most-recent filename
mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename)
if mch is not None and len(mch.groups()) > 0:
# there should just be one now
files_by_repo[mch.group(1)] = attr
# sort each array in hash,
# & get the name and time of the most-recent file for each catalog
for rmt in self.files:
LOG.info("Checking on %s catalog file", rmt)
fname = self.files[rmt]['file']
remotef = files_by_repo[rmt]
target_name = '/'.join((self.rawdir, fname))
# check if the local file is out of date, if so, download.
# otherwise, skip.
# we rename (for simplicity) the original file
fstat = None
if os.path.exists(target_name):
fstat = os.stat(target_name)
LOG.info(
"Local file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]:
if fstat is None:
LOG.info("File does not exist locally; downloading...")
else:
LOG.info(
"New version of %s catalog available; downloading...", rmt)
sftp.get(remotef.filename, target_name)
LOG.info(
"Fetched remote %s -> %s", remotef.filename, target_name)
fstat = os.stat(target_name)
filedate = datetime.utcfromtimestamp(
remotef.st_mtime).strftime("%Y-%m-%d")
LOG.info(
"New file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
else:
LOG.info("File %s exists; using local copy", fname)
filedate = datetime.utcfromtimestamp(
fstat[stat.ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setFileAccessUrl(remotef.filename, True)
self.dataset.setVersion(filedate)
return
|
python
|
def fetch(self, is_dl_forced=False):
"""
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
"""
host = config.get_config()['dbauth']['coriell']['host']
key = config.get_config()['dbauth']['coriell']['private_key']
user = config.get_config()['user']['coriell']
passwd = config.get_config()['keys'][user]
with pysftp.Connection(
host, username=user, password=passwd, private_key=key) as sftp:
# check to make sure each file is in there
# get the remote files
remote_files = sftp.listdir_attr()
files_by_repo = {}
for attr in remote_files:
# for each catalog, get the most-recent filename
mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename)
if mch is not None and len(mch.groups()) > 0:
# there should just be one now
files_by_repo[mch.group(1)] = attr
# sort each array in hash,
# & get the name and time of the most-recent file for each catalog
for rmt in self.files:
LOG.info("Checking on %s catalog file", rmt)
fname = self.files[rmt]['file']
remotef = files_by_repo[rmt]
target_name = '/'.join((self.rawdir, fname))
# check if the local file is out of date, if so, download.
# otherwise, skip.
# we rename (for simplicity) the original file
fstat = None
if os.path.exists(target_name):
fstat = os.stat(target_name)
LOG.info(
"Local file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]:
if fstat is None:
LOG.info("File does not exist locally; downloading...")
else:
LOG.info(
"New version of %s catalog available; downloading...", rmt)
sftp.get(remotef.filename, target_name)
LOG.info(
"Fetched remote %s -> %s", remotef.filename, target_name)
fstat = os.stat(target_name)
filedate = datetime.utcfromtimestamp(
remotef.st_mtime).strftime("%Y-%m-%d")
LOG.info(
"New file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
else:
LOG.info("File %s exists; using local copy", fname)
filedate = datetime.utcfromtimestamp(
fstat[stat.ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setFileAccessUrl(remotef.filename, True)
self.dataset.setVersion(filedate)
return
|
[
"def",
"fetch",
"(",
"self",
",",
"is_dl_forced",
"=",
"False",
")",
":",
"host",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'dbauth'",
"]",
"[",
"'coriell'",
"]",
"[",
"'host'",
"]",
"key",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'dbauth'",
"]",
"[",
"'coriell'",
"]",
"[",
"'private_key'",
"]",
"user",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'user'",
"]",
"[",
"'coriell'",
"]",
"passwd",
"=",
"config",
".",
"get_config",
"(",
")",
"[",
"'keys'",
"]",
"[",
"user",
"]",
"with",
"pysftp",
".",
"Connection",
"(",
"host",
",",
"username",
"=",
"user",
",",
"password",
"=",
"passwd",
",",
"private_key",
"=",
"key",
")",
"as",
"sftp",
":",
"# check to make sure each file is in there",
"# get the remote files",
"remote_files",
"=",
"sftp",
".",
"listdir_attr",
"(",
")",
"files_by_repo",
"=",
"{",
"}",
"for",
"attr",
"in",
"remote_files",
":",
"# for each catalog, get the most-recent filename",
"mch",
"=",
"re",
".",
"match",
"(",
"'(NIGMS|NIA|NHGRI|NINDS)'",
",",
"attr",
".",
"filename",
")",
"if",
"mch",
"is",
"not",
"None",
"and",
"len",
"(",
"mch",
".",
"groups",
"(",
")",
")",
">",
"0",
":",
"# there should just be one now",
"files_by_repo",
"[",
"mch",
".",
"group",
"(",
"1",
")",
"]",
"=",
"attr",
"# sort each array in hash,",
"# & get the name and time of the most-recent file for each catalog",
"for",
"rmt",
"in",
"self",
".",
"files",
":",
"LOG",
".",
"info",
"(",
"\"Checking on %s catalog file\"",
",",
"rmt",
")",
"fname",
"=",
"self",
".",
"files",
"[",
"rmt",
"]",
"[",
"'file'",
"]",
"remotef",
"=",
"files_by_repo",
"[",
"rmt",
"]",
"target_name",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"fname",
")",
")",
"# check if the local file is out of date, if so, download.",
"# otherwise, skip.",
"# we rename (for simplicity) the original file",
"fstat",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"target_name",
")",
":",
"fstat",
"=",
"os",
".",
"stat",
"(",
"target_name",
")",
"LOG",
".",
"info",
"(",
"\"Local file date: %s\"",
",",
"datetime",
".",
"utcfromtimestamp",
"(",
"fstat",
"[",
"stat",
".",
"ST_CTIME",
"]",
")",
")",
"if",
"fstat",
"is",
"None",
"or",
"remotef",
".",
"st_mtime",
">",
"fstat",
"[",
"stat",
".",
"ST_CTIME",
"]",
":",
"if",
"fstat",
"is",
"None",
":",
"LOG",
".",
"info",
"(",
"\"File does not exist locally; downloading...\"",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"New version of %s catalog available; downloading...\"",
",",
"rmt",
")",
"sftp",
".",
"get",
"(",
"remotef",
".",
"filename",
",",
"target_name",
")",
"LOG",
".",
"info",
"(",
"\"Fetched remote %s -> %s\"",
",",
"remotef",
".",
"filename",
",",
"target_name",
")",
"fstat",
"=",
"os",
".",
"stat",
"(",
"target_name",
")",
"filedate",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"remotef",
".",
"st_mtime",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"LOG",
".",
"info",
"(",
"\"New file date: %s\"",
",",
"datetime",
".",
"utcfromtimestamp",
"(",
"fstat",
"[",
"stat",
".",
"ST_CTIME",
"]",
")",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"File %s exists; using local copy\"",
",",
"fname",
")",
"filedate",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"fstat",
"[",
"stat",
".",
"ST_CTIME",
"]",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"self",
".",
"dataset",
".",
"setFileAccessUrl",
"(",
"remotef",
".",
"filename",
",",
"True",
")",
"self",
".",
"dataset",
".",
"setVersion",
"(",
"filedate",
")",
"return"
] |
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
|
[
"Here",
"we",
"connect",
"to",
"the",
"coriell",
"sftp",
"server",
"using",
"private",
"connection",
"details",
".",
"They",
"dump",
"bi",
"-",
"weekly",
"files",
"with",
"a",
"timestamp",
"in",
"the",
"filename",
".",
"For",
"each",
"catalog",
"we",
"ping",
"the",
"remote",
"site",
"and",
"pull",
"the",
"most",
"-",
"recently",
"updated",
"file",
"renaming",
"it",
"to",
"our",
"local",
"latest",
".",
"csv",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L150-L224
|
18,397
|
monarch-initiative/dipper
|
dipper/sources/Coriell.py
|
Coriell._process_collection
|
def _process_collection(self, collection_id, label, page):
"""
This function will process the data supplied internally
about the repository from Coriell.
Triples:
Repository a ERO:collection
rdf:label Literal(label)
foaf:page Literal(page)
:param collection_id:
:param label:
:param page:
:return:
"""
# ############# BUILD THE CELL LINE REPOSITORY #############
for graph in [self.graph, self.testgraph]:
# TODO: How to devise a label for each repository?
model = Model(graph)
reference = Reference(graph)
repo_id = 'CoriellCollection:' + collection_id
repo_label = label
repo_page = page
model.addIndividualToGraph(
repo_id, repo_label, self.globaltt['collection'])
reference.addPage(repo_id, repo_page)
return
|
python
|
def _process_collection(self, collection_id, label, page):
"""
This function will process the data supplied internally
about the repository from Coriell.
Triples:
Repository a ERO:collection
rdf:label Literal(label)
foaf:page Literal(page)
:param collection_id:
:param label:
:param page:
:return:
"""
# ############# BUILD THE CELL LINE REPOSITORY #############
for graph in [self.graph, self.testgraph]:
# TODO: How to devise a label for each repository?
model = Model(graph)
reference = Reference(graph)
repo_id = 'CoriellCollection:' + collection_id
repo_label = label
repo_page = page
model.addIndividualToGraph(
repo_id, repo_label, self.globaltt['collection'])
reference.addPage(repo_id, repo_page)
return
|
[
"def",
"_process_collection",
"(",
"self",
",",
"collection_id",
",",
"label",
",",
"page",
")",
":",
"# ############# BUILD THE CELL LINE REPOSITORY #############",
"for",
"graph",
"in",
"[",
"self",
".",
"graph",
",",
"self",
".",
"testgraph",
"]",
":",
"# TODO: How to devise a label for each repository?",
"model",
"=",
"Model",
"(",
"graph",
")",
"reference",
"=",
"Reference",
"(",
"graph",
")",
"repo_id",
"=",
"'CoriellCollection:'",
"+",
"collection_id",
"repo_label",
"=",
"label",
"repo_page",
"=",
"page",
"model",
".",
"addIndividualToGraph",
"(",
"repo_id",
",",
"repo_label",
",",
"self",
".",
"globaltt",
"[",
"'collection'",
"]",
")",
"reference",
".",
"addPage",
"(",
"repo_id",
",",
"repo_page",
")",
"return"
] |
This function will process the data supplied internally
about the repository from Coriell.
Triples:
Repository a ERO:collection
rdf:label Literal(label)
foaf:page Literal(page)
:param collection_id:
:param label:
:param page:
:return:
|
[
"This",
"function",
"will",
"process",
"the",
"data",
"supplied",
"internally",
"about",
"the",
"repository",
"from",
"Coriell",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L760-L788
|
18,398
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_genotypes
|
def _process_genotypes(self, limit):
"""
Add the genotype internal id to flybase mapping to the idhashmap.
Also, add them as individuals to the graph.
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'genotype'))
LOG.info("building labels for genotypes")
geno = Genotype(graph)
fly_tax = self.globaltt['Drosophila melanogaster']
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(genotype_num, uniquename, description, name) = line
# if self.test_mode is True:
# if int(object_key) not in self.test_keys.get('genotype'):
# continue
# add the internal genotype to pub mapping
genotype_id = 'MONARCH:FBgeno'+str(genotype_num)
self.idhash['genotype'][genotype_num] = genotype_id
if description == '':
description = None
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode and int(genotype_num) \
not in self.test_keys['genotype']:
continue
model.addIndividualToGraph(
genotype_id, uniquename,
self.globaltt['intrinsic_genotype'],
description)
# we know all genotypes are in flies
# FIXME we assume here they are in melanogaster,
# but that isn't necessarily true!!!
# TODO should the taxon be == genomic background?
geno.addTaxon(fly_tax, genotype_id)
genotype_iid = self._makeInternalIdentifier(
'genotype', genotype_num)
model.addComment(
genotype_id, genotype_iid)
if name.strip() != '':
model.addSynonym(genotype_id, name)
return
|
python
|
def _process_genotypes(self, limit):
"""
Add the genotype internal id to flybase mapping to the idhashmap.
Also, add them as individuals to the graph.
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'genotype'))
LOG.info("building labels for genotypes")
geno = Genotype(graph)
fly_tax = self.globaltt['Drosophila melanogaster']
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(genotype_num, uniquename, description, name) = line
# if self.test_mode is True:
# if int(object_key) not in self.test_keys.get('genotype'):
# continue
# add the internal genotype to pub mapping
genotype_id = 'MONARCH:FBgeno'+str(genotype_num)
self.idhash['genotype'][genotype_num] = genotype_id
if description == '':
description = None
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode and int(genotype_num) \
not in self.test_keys['genotype']:
continue
model.addIndividualToGraph(
genotype_id, uniquename,
self.globaltt['intrinsic_genotype'],
description)
# we know all genotypes are in flies
# FIXME we assume here they are in melanogaster,
# but that isn't necessarily true!!!
# TODO should the taxon be == genomic background?
geno.addTaxon(fly_tax, genotype_id)
genotype_iid = self._makeInternalIdentifier(
'genotype', genotype_num)
model.addComment(
genotype_id, genotype_iid)
if name.strip() != '':
model.addSynonym(genotype_id, name)
return
|
[
"def",
"_process_genotypes",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'genotype'",
")",
")",
"LOG",
".",
"info",
"(",
"\"building labels for genotypes\"",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"fly_tax",
"=",
"self",
".",
"globaltt",
"[",
"'Drosophila melanogaster'",
"]",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"line",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"genotype_num",
",",
"uniquename",
",",
"description",
",",
"name",
")",
"=",
"line",
"# if self.test_mode is True:",
"# if int(object_key) not in self.test_keys.get('genotype'):",
"# continue",
"# add the internal genotype to pub mapping",
"genotype_id",
"=",
"'MONARCH:FBgeno'",
"+",
"str",
"(",
"genotype_num",
")",
"self",
".",
"idhash",
"[",
"'genotype'",
"]",
"[",
"genotype_num",
"]",
"=",
"genotype_id",
"if",
"description",
"==",
"''",
":",
"description",
"=",
"None",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"genotype_num",
")",
"not",
"in",
"self",
".",
"test_keys",
"[",
"'genotype'",
"]",
":",
"continue",
"model",
".",
"addIndividualToGraph",
"(",
"genotype_id",
",",
"uniquename",
",",
"self",
".",
"globaltt",
"[",
"'intrinsic_genotype'",
"]",
",",
"description",
")",
"# we know all genotypes are in flies",
"# FIXME we assume here they are in melanogaster,",
"# but that isn't necessarily true!!!",
"# TODO should the taxon be == genomic background?",
"geno",
".",
"addTaxon",
"(",
"fly_tax",
",",
"genotype_id",
")",
"genotype_iid",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'genotype'",
",",
"genotype_num",
")",
"model",
".",
"addComment",
"(",
"genotype_id",
",",
"genotype_iid",
")",
"if",
"name",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"model",
".",
"addSynonym",
"(",
"genotype_id",
",",
"name",
")",
"return"
] |
Add the genotype internal id to flybase mapping to the idhashmap.
Also, add them as individuals to the graph.
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
|
[
"Add",
"the",
"genotype",
"internal",
"id",
"to",
"flybase",
"mapping",
"to",
"the",
"idhashmap",
".",
"Also",
"add",
"them",
"as",
"individuals",
"to",
"the",
"graph",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L358-L423
|
18,399
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_stocks
|
def _process_stocks(self, limit):
"""
Stock definitions.
Here we instantiate them as instances of the given taxon.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'stock'))
LOG.info("building labels for stocks")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(stock_id, dbxref_id, organism_id, name, uniquename,
description, type_id, is_obsolete) = line
# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670
stock_num = stock_id
stock_id = 'FlyBase:'+uniquename
self.idhash['stock'][stock_num] = stock_id
stock_label = description
organism_key = organism_id
taxon = self.idhash['organism'][organism_key]
# from what i can tell, the dbxrefs are just more FBst,
# so no added information vs uniquename
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode \
and int(stock_num) not in self.test_keys['strain']:
continue
# tax_label = self.label_hash[taxon] # unused
# add the tax in case it hasn't been already
model.addClassToGraph(taxon)
model.addIndividualToGraph(stock_id, stock_label, taxon)
if is_obsolete == 't':
model.addDeprecatedIndividual(stock_id)
return
|
python
|
def _process_stocks(self, limit):
"""
Stock definitions.
Here we instantiate them as instances of the given taxon.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'stock'))
LOG.info("building labels for stocks")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(stock_id, dbxref_id, organism_id, name, uniquename,
description, type_id, is_obsolete) = line
# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670
stock_num = stock_id
stock_id = 'FlyBase:'+uniquename
self.idhash['stock'][stock_num] = stock_id
stock_label = description
organism_key = organism_id
taxon = self.idhash['organism'][organism_key]
# from what i can tell, the dbxrefs are just more FBst,
# so no added information vs uniquename
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode \
and int(stock_num) not in self.test_keys['strain']:
continue
# tax_label = self.label_hash[taxon] # unused
# add the tax in case it hasn't been already
model.addClassToGraph(taxon)
model.addIndividualToGraph(stock_id, stock_label, taxon)
if is_obsolete == 't':
model.addDeprecatedIndividual(stock_id)
return
|
[
"def",
"_process_stocks",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'stock'",
")",
")",
"LOG",
".",
"info",
"(",
"\"building labels for stocks\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"line",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"stock_id",
",",
"dbxref_id",
",",
"organism_id",
",",
"name",
",",
"uniquename",
",",
"description",
",",
"type_id",
",",
"is_obsolete",
")",
"=",
"line",
"# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670",
"stock_num",
"=",
"stock_id",
"stock_id",
"=",
"'FlyBase:'",
"+",
"uniquename",
"self",
".",
"idhash",
"[",
"'stock'",
"]",
"[",
"stock_num",
"]",
"=",
"stock_id",
"stock_label",
"=",
"description",
"organism_key",
"=",
"organism_id",
"taxon",
"=",
"self",
".",
"idhash",
"[",
"'organism'",
"]",
"[",
"organism_key",
"]",
"# from what i can tell, the dbxrefs are just more FBst,",
"# so no added information vs uniquename",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"stock_num",
")",
"not",
"in",
"self",
".",
"test_keys",
"[",
"'strain'",
"]",
":",
"continue",
"# tax_label = self.label_hash[taxon] # unused",
"# add the tax in case it hasn't been already",
"model",
".",
"addClassToGraph",
"(",
"taxon",
")",
"model",
".",
"addIndividualToGraph",
"(",
"stock_id",
",",
"stock_label",
",",
"taxon",
")",
"if",
"is_obsolete",
"==",
"'t'",
":",
"model",
".",
"addDeprecatedIndividual",
"(",
"stock_id",
")",
"return"
] |
Stock definitions.
Here we instantiate them as instances of the given taxon.
:param limit:
:return:
|
[
"Stock",
"definitions",
".",
"Here",
"we",
"instantiate",
"them",
"as",
"instances",
"of",
"the",
"given",
"taxon",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L426-L480
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.