repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
dcartman/pygame-menu | pygame_menu/examples/other/widget_positioning.py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - WIDGET POSITIONING
Test widget positioning example.
"""
import pygame_menu
from pygame_menu.examples import create_example_window
# Create the surface
surface = create_example_window('Example - Widget Positioning', (640, 480))
# Create a custom theme
my_theme = pygame_menu.themes.THEME_DARK.copy()
my_theme.title = False # Hide the menu title
menu = pygame_menu.Menu(
height=480, # Use full-screen
theme=my_theme,
title='',
center_content=False,
width=640
)
menu.add.label(
'My App',
background_color='#333',
background_inflate=(30, 0),
float=True # Widget does not add size to the menu
).translate(0, 10)
label = menu.add.label(
'Lorem ipsum',
float=True,
font_name=pygame_menu.font.FONT_OPEN_SANS_ITALIC,
font_size=25)
label.rotate(90)
label.translate(300, 160)
# Button options
b1 = menu.add.button(
'Main Menu',
lambda: print(f'My method'),
align=pygame_menu.locals.ALIGN_LEFT,
float=True,
selection_color='#fff'
)
b1.translate(10, 170)
b2 = menu.add.button(
'Exit',
pygame_menu.events.EXIT,
align=pygame_menu.locals.ALIGN_LEFT,
float=True,
selection_color='#fff'
)
b2.translate(10, 235)
# Bottom scrollable text
f = menu.add.frame_v(
background_color='#6b6e5e',
border_color='#36372f',
border_width=1,
float=True,
height=480,
max_height=100,
width=200
)
f.translate(220, 390)
labels = [menu.add.label(f' Lorem ipsum #{i}', font_size=15, font_color='#000000', padding=0) for i in range(20)]
for j in labels:
f.pack(j)
if __name__ == '__main__':
menu.mainloop(surface)
|
dcartman/pygame-menu | pygame_menu/controls.py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
CONTROLS
Default controls of Menu object and key definition.
"""
__all__ = [
# Joy pad
'JOY_AXIS_X',
'JOY_AXIS_Y',
'JOY_BUTTON_BACK',
'JOY_BUTTON_SELECT',
'JOY_DEADZONE',
'JOY_DELAY',
'JOY_DOWN',
'JOY_LEFT',
'JOY_REPEAT',
'JOY_RIGHT',
'JOY_UP',
# Keyboard events
'KEY_APPLY',
'KEY_BACK',
'KEY_CLOSE_MENU',
'KEY_LEFT',
'KEY_MOVE_DOWN',
'KEY_MOVE_UP',
'KEY_RIGHT'
]
# Imports
import pygame.locals as __locals
# Joy pad
JOY_AXIS_X = 0
JOY_AXIS_Y = 1
JOY_BUTTON_BACK = 1
JOY_BUTTON_SELECT = 0
JOY_DEADZONE = 0.5
JOY_DELAY = 300 # ms
JOY_DOWN = (0, -1)
JOY_LEFT = (-1, 0)
JOY_REPEAT = 100 # ms
JOY_RIGHT = (1, 0)
JOY_UP = (0, 1)
# Keyboard events
KEY_APPLY = __locals.K_RETURN
KEY_BACK = __locals.K_BACKSPACE
KEY_CLOSE_MENU = __locals.K_ESCAPE
KEY_LEFT = __locals.K_LEFT
KEY_MOVE_DOWN = __locals.K_UP
KEY_MOVE_UP = __locals.K_DOWN # Consider keys are "inverted"
KEY_RIGHT = __locals.K_RIGHT
KEY_TAB = __locals.K_TAB
|
dcartman/pygame-menu | pygame_menu/_widgetmanager.py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
MENU WIDGET MANAGER
Easy widget add/remove to Menus.
"""
__all__ = ['WidgetManager']
import pygame_menu
from pygame_menu._base import Base
from pygame_menu.font import assert_font
from pygame_menu.utils import assert_vector, assert_color, assert_cursor, \
assert_position_vector, warn
# Import widgets
from pygame_menu.widgets.core.widget import Widget, check_widget_mouseleave
from pygame_menu.widgets.widget.button import ButtonManager
from pygame_menu.widgets.widget.colorinput import ColorInputManager
from pygame_menu.widgets.widget.dropselect import DropSelectManager
from pygame_menu.widgets.widget.dropselect_multiple import DropSelectMultipleManager
from pygame_menu.widgets.widget.frame import FrameManager
from pygame_menu.widgets.widget.hmargin import HMarginManager
from pygame_menu.widgets.widget.image import ImageManager
from pygame_menu.widgets.widget.label import LabelManager
from pygame_menu.widgets.widget.menulink import MenuLinkManager
from pygame_menu.widgets.widget.none import NoneWidgetManager
from pygame_menu.widgets.widget.progressbar import ProgressBarManager
from pygame_menu.widgets.widget.rangeslider import RangeSliderManager
from pygame_menu.widgets.widget.selector import SelectorManager
from pygame_menu.widgets.widget.surface import SurfaceWidgetManager
from pygame_menu.widgets.widget.table import TableManager
from pygame_menu.widgets.widget.textinput import TextInputManager
from pygame_menu.widgets.widget.toggleswitch import ToggleSwitchManager
from pygame_menu.widgets.widget.vmargin import VMarginManager
from pygame_menu._types import Any, Dict, PaddingInstance
# noinspection PyProtectedMember
class WidgetManager(
Base,
ButtonManager,
ColorInputManager,
DropSelectManager,
DropSelectMultipleManager,
FrameManager,
HMarginManager,
ImageManager,
LabelManager,
MenuLinkManager,
NoneWidgetManager,
ProgressBarManager,
RangeSliderManager,
SelectorManager,
SurfaceWidgetManager,
TableManager,
TextInputManager,
ToggleSwitchManager,
VMarginManager
):
"""
Add/Remove widgets to the Menu.
:param menu: Menu reference
"""
def __init__(self, menu: 'pygame_menu.Menu') -> None:
super(WidgetManager, self).__init__(object_id=menu.get_id() + '+widget-manager')
self._menu = menu
@property
def _theme(self) -> 'pygame_menu.Theme':
return self._menu.get_theme()
def _add_submenu(self, menu: 'pygame_menu.Menu', hook: 'Widget') -> None:
assert isinstance(menu, pygame_menu.Menu)
assert menu != self._menu, 'submenu cannot point to menu itself'
assert isinstance(hook, Widget)
if menu not in self._menu._submenus.keys():
self._menu._submenus[menu] = []
assert hook not in self._menu._submenus[menu], \
f'widget {hook.get_class_id()} already hooks submenu {menu.get_class_id()}'
self._menu._submenus[menu].append(hook)
hook._menu_hook = menu
def _filter_widget_attributes(self, kwargs: Dict) -> Dict[str, Any]:
attributes = {}
# align
align = kwargs.pop('align', self._theme.widget_alignment)
assert isinstance(align, str)
attributes['align'] = align
# background_color
background_is_color = False
background_color = kwargs.pop('background_color', self._theme.widget_background_color)
if background_color is not None:
if isinstance(background_color, pygame_menu.BaseImage):
pass
else:
background_color = assert_color(background_color)
background_is_color = True
attributes['background_color'] = background_color
# background_inflate
background_inflate = kwargs.pop('background_inflate',
self._theme.widget_background_inflate)
if background_inflate == 0:
background_inflate = (0, 0)
assert_vector(background_inflate, 2, int)
assert background_inflate[0] >= 0 and background_inflate[1] >= 0, \
'both background inflate components must be equal or greater than zero'
attributes['background_inflate'] = background_inflate
# border_color
border_color = kwargs.pop('border_color',
self._theme.widget_border_color)
if border_color is not None:
border_color = assert_color(border_color)
attributes['border_color'] = border_color
# border_inflate
border_inflate = kwargs.pop('border_inflate',
self._theme.widget_border_inflate)
if border_inflate == 0:
border_inflate = (0, 0)
assert_vector(border_inflate, 2, int)
assert isinstance(border_inflate[0], int) and border_inflate[0] >= 0
assert isinstance(border_inflate[1], int) and border_inflate[1] >= 0
attributes['border_inflate'] = border_inflate
# border_position
border_position = kwargs.pop('border_position',
self._theme.widget_border_position)
assert_position_vector(border_position)
attributes['border_position'] = border_position
# border_width
border_width = kwargs.pop('border_width', self._theme.widget_border_width)
assert isinstance(border_width, int) and border_width >= 0
attributes['border_width'] = border_width
# cursor
cursor = kwargs.pop('cursor', self._theme.widget_cursor)
assert_cursor(cursor)
attributes['cursor'] = cursor
# floating status
float_ = kwargs.pop('float', False)
assert isinstance(float_, bool)
attributes['float'] = float_
float_origin_position = kwargs.pop('float_origin_position', False)
assert isinstance(float_origin_position, bool)
attributes['float_origin_position'] = float_origin_position
# font_antialias
attributes['font_antialias'] = self._theme.widget_font_antialias
# font_background_color
font_background_color = kwargs.pop('font_background_color',
self._theme.widget_font_background_color)
if font_background_color is None and \
self._theme.widget_font_background_color_from_menu and \
not background_is_color:
if not isinstance(self._theme.background_color, pygame_menu.BaseImage):
font_background_color = assert_color(self._theme.background_color)
attributes['font_background_color'] = font_background_color
# font_color
font_color = kwargs.pop('font_color', self._theme.widget_font_color)
attributes['font_color'] = assert_color(font_color)
# font_name
font_name = kwargs.pop('font_name', self._theme.widget_font)
assert_font(font_name)
attributes['font_name'] = font_name
# font_shadow
font_shadow = kwargs.pop('font_shadow', self._theme.widget_font_shadow)
assert isinstance(font_shadow, bool)
attributes['font_shadow'] = font_shadow
# font_shadow_color
font_shadow_color = kwargs.pop('font_shadow_color',
self._theme.widget_font_shadow_color)
attributes['font_shadow_color'] = assert_color(font_shadow_color)
# font_shadow_offset
font_shadow_offset = kwargs.pop('font_shadow_offset',
self._theme.widget_font_shadow_offset)
assert isinstance(font_shadow_offset, int)
attributes['font_shadow_offset'] = font_shadow_offset
# font_shadow_position
font_shadow_position = kwargs.pop('font_shadow_position',
self._theme.widget_font_shadow_position)
assert isinstance(font_shadow_position, str)
attributes['font_shadow_position'] = font_shadow_position
# font_size
font_size = kwargs.pop('font_size', self._theme.widget_font_size)
assert isinstance(font_size, int)
assert font_size > 0, 'font size must be greater than zero'
attributes['font_size'] = font_size
# margin
margin = kwargs.pop('margin', self._theme.widget_margin)
if margin == 0:
margin = (0, 0)
assert_vector(margin, 2)
attributes['margin'] = margin
# padding
padding = kwargs.pop('padding', self._theme.widget_padding)
assert isinstance(padding, PaddingInstance)
attributes['padding'] = padding
# readonly_color
readonly_color = kwargs.pop('readonly_color', self._theme.readonly_color)
attributes['readonly_color'] = assert_color(readonly_color)
# readonly_selected_color
readonly_selected_color = kwargs.pop('readonly_selected_color',
self._theme.readonly_selected_color)
attributes['readonly_selected_color'] = assert_color(readonly_selected_color)
# selection_color
selection_color = kwargs.pop('selection_color', self._theme.selection_color)
attributes['selection_color'] = assert_color(selection_color)
# selection_effect
selection_effect = kwargs.pop('selection_effect', self._theme.widget_selection_effect)
if selection_effect is None:
selection_effect = pygame_menu.widgets.NoneSelection()
else:
selection_effect = selection_effect.copy()
assert isinstance(selection_effect, pygame_menu.widgets.core.Selection)
selection_effect.set_color(attributes['selection_color'])
attributes['selection_effect'] = selection_effect
# shadow
attributes['shadow_aa'] = kwargs.pop('shadow_aa', self._theme.widget_shadow_aa)
attributes['shadow_color'] = kwargs.pop('shadow_color', self._theme.widget_shadow_color)
attributes['shadow_radius'] = kwargs.pop('shadow_radius', self._theme.widget_shadow_radius)
attributes['shadow_type'] = kwargs.pop('shadow_type', self._theme.widget_shadow_type)
attributes['shadow_width'] = kwargs.pop('shadow_width', self._theme.widget_shadow_width)
# tab_size
attributes['tab_size'] = kwargs.pop('tab_size',
self._theme.widget_tab_size)
return attributes
def _configure_widget(self, widget: 'Widget', **kwargs) -> None:
assert isinstance(widget, Widget)
widget.set_alignment(
align=kwargs['align']
)
widget.set_background_color(
color=kwargs['background_color'],
inflate=kwargs['background_inflate']
)
widget.set_border(
color=kwargs['border_color'],
inflate=kwargs['border_inflate'],
position=kwargs['border_position'],
width=kwargs['border_width']
)
widget.set_controls(
joystick=self._menu._joystick,
keyboard=self._menu._keyboard,
mouse=self._menu._mouse,
touchscreen=self._menu._touchscreen
)
widget.set_cursor(
cursor=kwargs['cursor']
)
widget.set_float(
float_status=kwargs['float'],
origin_position=kwargs['float_origin_position']
)
widget.set_font(
antialias=kwargs['font_antialias'],
background_color=kwargs['font_background_color'],
color=kwargs['font_color'],
font=kwargs['font_name'],
font_size=kwargs['font_size'],
readonly_color=kwargs['readonly_color'],
readonly_selected_color=kwargs['readonly_selected_color'],
selected_color=kwargs['selection_color']
)
widget.set_font_shadow(
color=kwargs['font_shadow_color'],
enabled=kwargs['font_shadow'],
offset=kwargs['font_shadow_offset'],
position=kwargs['font_shadow_position']
)
widget.set_margin(
x=kwargs['margin'][0],
y=kwargs['margin'][1]
)
widget.set_padding(
padding=kwargs['padding']
)
widget.set_selection_effect(
selection=kwargs['selection_effect']
)
widget.set_tab_size(
tab_size=kwargs['tab_size']
)
widget.shadow(
aa_amount=kwargs['shadow_aa'],
color=kwargs['shadow_color'],
corner_radius=kwargs['shadow_radius'],
shadow_type=kwargs['shadow_type'],
shadow_width=kwargs['shadow_width']
)
if self._theme.widget_background_inflate_to_selection:
widget.background_inflate_to_selection_effect()
widget._update__repr___(self)
widget._keyboard_ignore_nonphysical = self._menu._keyboard_ignore_nonphysical
widget.configured = True
widget._configure()
@staticmethod
def _check_kwargs(kwargs: Dict) -> None:
for invalid_keyword in kwargs.keys():
raise ValueError(f'widget addition optional parameter kwargs.{invalid_keyword} is not valid')
def _append_widget(self, widget: 'Widget') -> None:
assert isinstance(widget, Widget)
if widget.get_menu() is None:
widget.set_menu(self._menu)
assert widget.get_menu() == self._menu, \
'widget cannot have a different instance of menu'
self._menu._check_id_duplicated(widget.get_id())
if widget.get_scrollarea() is None:
widget.set_scrollarea(self._menu.get_scrollarea())
# Unselect
widget.select(False)
# Append to lists
self._menu._widgets.append(widget)
# Update selection index
if self._menu._index < 0 and widget.is_selectable:
widget.select()
self._menu._index = len(self._menu._widgets) - 1
# Force menu rendering, this checks if the menu overflows or has sizing
# errors; if added on execution time forces the update of the surface
self._menu._widgets_surface = None
try:
self._menu._render()
except (pygame_menu.menu._MenuSizingException,
pygame_menu.menu._MenuWidgetOverflow):
self._menu.remove_widget(widget)
raise
self._menu.render()
# Sort frame widgets, as render position changes frame position/frame
if len(self._menu._update_frames) > 0:
self._menu._update_frames[0]._sort_menu_update_frames()
# Update widgets
check_widget_mouseleave()
# Call event
widget._append_to_menu()
# noinspection PyMissingOrEmptyDocstring
def configure_defaults_widget(self, widget: 'Widget') -> None:
self._configure_widget(widget, **self._filter_widget_attributes({}))
def generic_widget(
self,
widget: 'Widget',
configure_defaults: bool = False
) -> 'Widget':
"""
Add generic widget to the Menu.
.. note::
The widget should be fully configured by the user: font, padding, etc.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
.. warning::
Unintended behaviours may happen while using this method, use only with
caution; specially while creating nested submenus with buttons.
:param widget: Widget to be added
:param configure_defaults: Apply defaults widget configuration (for example, theme)
:return: The added widget object
:rtype: :py:class:`pygame_menu.widgets.Widget`
"""
assert isinstance(widget, Widget)
if widget.get_menu() is not None:
raise ValueError('widget to be added is already appended to another Menu')
# Raise warning if adding button with Menu
if isinstance(widget, pygame_menu.widgets.Button) and widget.to_menu:
warn(
'prefer adding submenus using add_button method instead, '
'unintended behaviours may occur'
)
# Configure widget
if configure_defaults:
self.configure_defaults_widget(widget)
self._append_widget(widget)
return widget
|
dcartman/pygame-menu | pygame_menu/examples/other/scrollbar_area.py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - SCROLL AREA
Shows ScrollArea widget usage.
"""
__all__ = ['main']
import pygame
from pygame_menu import locals
from pygame_menu.examples import create_example_window
from pygame_menu._scrollarea import ScrollArea
from pygame_menu.utils import make_surface
import itertools
from typing import Generator
FPS = 30
W_SIZE = 800 # Width of window size
H_SIZE = 600 # Height of window size
COLOR_BACKGROUND = (128, 230, 198)
LEGEND = 'Area {}x{}\nWorld {}x{}\nPress [ESC] to change'
WORLDS = {
'1': {'pos': (0, 0),
'win': (W_SIZE, H_SIZE),
'size': (W_SIZE * 2, H_SIZE * 3)},
'2': {'pos': (200, 100),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE * 2, H_SIZE * 3)},
'3': {'pos': (50, 250),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (200, 200)},
'4': {'pos': (350, 250),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE // 2, H_SIZE // 2)},
'5': {'pos': (200, 200),
'win': (W_SIZE // 2, H_SIZE // 2),
'size': (W_SIZE // 2, H_SIZE // 2 + 10)},
'6': {'pos': (10, 10),
'win': (W_SIZE - 300, H_SIZE // 2),
'size': (W_SIZE - 200, H_SIZE // 2 - 10)}
}
def make_world(width: int, height: int, text: str = '') -> 'pygame.Surface':
"""
Create a test surface.
:param width: Width in pixels
:param height: Height in pixels
:param text: Text to write
:return: World surface
"""
world = make_surface(width, height)
world.fill((210, 210, 210))
font = pygame.font.SysFont('arial', 20)
posy = 60
for line in text.splitlines():
text = font.render(str(line), True, (0, 0, 0))
world.blit(text, (60, posy))
posy += text.get_height() + 10
for x in range(0, width, 10):
if x % 100 == 0 and x != 0:
pygame.draw.line(world, (255, 0, 0), (x, 0), (x, 20))
pygame.draw.line(world, (180, 180, 180), (x, 80), (x, height))
tick = font.render(str(x), True, (255, 0, 0))
world.blit(tick, (int(x - tick.get_width() / 2), 25))
else:
pygame.draw.line(world, (255, 0, 0), (x, 0), (x, 10))
for y in range(0, height, 10):
if y % 100 == 0 and y != 0:
pygame.draw.line(world, (255, 0, 0), (0, y), (20, y))
pygame.draw.line(world, (180, 180, 180), (80, y), (width, y))
tick = font.render(str(y), True, (255, 0, 0))
world.blit(tick, (25, int(y - tick.get_height() / 2)))
else:
pygame.draw.line(world, (255, 0, 0), (0, y), (10, y))
return world
# noinspection PyProtectedMember
def iter_world(area: 'ScrollArea') -> Generator:
"""
Iterate through worlds.
:param area: Scroll area
"""
for name in itertools.cycle(WORLDS):
params = WORLDS[name]
area._rect.width = params['win'][0]
area._rect.height = params['win'][1]
text = LEGEND.format(params['win'][0], params['win'][1],
params['size'][0], params['size'][1])
area.set_world(make_world(params['size'][0],
params['size'][1],
text))
area.set_position(*params['pos'])
yield params
def main(test: bool = False) -> None:
"""
Main function.
:param test: Indicate function is being tested
"""
screen = create_example_window('Example - Scrolling Area', (W_SIZE, H_SIZE))
clock = pygame.time.Clock()
area = ScrollArea(
W_SIZE, H_SIZE,
scrollbars=(
locals.POSITION_SOUTH,
locals.POSITION_EAST,
locals.POSITION_WEST,
locals.POSITION_NORTH
)
)
worlds = iter_world(area)
next(worlds)
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
screen.fill(COLOR_BACKGROUND)
pygame.draw.rect(
screen,
(20, 89, 20),
area.get_rect().inflate(20, 20) # Inflate to see area overflow in case of bug
)
# Application events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit(0)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
next(worlds)
area.update(events)
area.draw(screen)
# Update surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
|
dcartman/pygame-menu | pygame_menu/widgets/widget/menulink.py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
MENULINK
Similar to a Button that opens a Menu, MenuLink is a widget that contains a Menu
reference. This Menu can be opened with .open() method.
"""
__all__ = [
'MenuLink',
'MenuLinkManager'
]
import pygame_menu
from abc import ABC
from pygame_menu.widgets.core.widget import AbstractWidgetManager
from pygame_menu.widgets.widget.none import NoneWidget
from pygame_menu.utils import is_callable
from pygame_menu._types import Callable
# noinspection PyMissingOrEmptyDocstring
class MenuLink(NoneWidget):
"""
Menu link widget; adds a link to another Menu. The behaviour is similar to a
button, but this widget is invisible, and cannot be selectable.
.. note::
MenuLink does not accept any transformation.
:param link_id: Link ID
:param menu_opener_handler: Callback for opening the menu object
:param menu: Menu object
"""
menu: 'pygame_menu.Menu'
def __init__(
self,
menu: 'pygame_menu.Menu',
menu_opener_handler: Callable,
link_id: str = ''
) -> None:
assert isinstance(menu, pygame_menu.Menu)
assert is_callable(menu_opener_handler), \
'menu opener handler must be callable (a function)'
super(MenuLink, self).__init__(
widget_id=link_id
)
self.menu = menu
self._onreturn = menu_opener_handler
self._visible = False
self.is_selectable = False
def hide(self) -> 'MenuLink':
pass
def show(self) -> 'MenuLink':
pass
def open(self) -> None:
return self._onreturn(self.menu)
class MenuLinkManager(AbstractWidgetManager, ABC):
"""
MenuLink manager.
"""
def menu_link(
self,
menu: 'pygame_menu.Menu',
link_id: str = ''
) -> 'pygame_menu.widgets.MenuLink':
"""
Adds a link to another Menu. The behaviour is similar to a button, but
this widget is invisible, and cannot be selectable.
Added menus can be opened using the ``.open()`` method. Opened menus change
the state of the parent Menu (the current pointer).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param menu: Menu to be added as a link (the new submenu)
:param link_id: ID of the menu link
:return: Menu link widget
:rtype: :py:class:`pygame_menu.widgets.MenuLink`
"""
if isinstance(menu, type(self._menu)):
# Check for recursive
if menu == self._menu or menu.in_submenu(self._menu, recursive=True):
raise ValueError(
f'Menu "{menu.get_title()}" is already on submenu structure,'
f' recursive menus lead to unexpected behaviours. For '
f'returning to previous menu use pygame_menu.events.BACK '
f'event defining an optional back_count number of menus to '
f'return from, default is 1'
)
else:
raise ValueError('menu object is not a pygame_menu.Menu class')
# noinspection PyProtectedMember
widget = MenuLink(
menu=menu,
menu_opener_handler=self._menu._open,
link_id=link_id
)
self.configure_defaults_widget(widget)
self._append_widget(widget)
self._add_submenu(menu, widget)
return widget
|
zzpmiracle/tensorflow-onnx | tf2onnx/onnx_opset/rnn.py | <gh_stars>0
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
rnn
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from tf2onnx import utils
from tf2onnx.handler import tf_op
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring
@tf_op("LSTMBlockCell")
class LSTMBlockCell:
@classmethod
def version_1(cls, ctx, node, **kwargs):
"""
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev .* wci + i)
f = sigmoid(cs_prev .* wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
"""
nodes = []
x, cs_prev, h_prev, w, wci, wcf, wco, b = node.input
forget_bias = float(node.get_attr("forget_bias").f)
cell_clip = float(node.get_attr("cell_clip").f)
use_peephole = bool(node.get_attr("use_peephole").i)
def make_sigmoid(i, w, b):
i_w_node = ctx.make_node("Mul", [i, w])
i_w_b_node = ctx.make_node("Add", [i_w_node.output[0], b])
output_node = ctx.make_node("Sigmoid", [i_w_b_node.output[0]])
nodes.extend([i_w_node, i_w_b_node, output_node])
return output_node.output[0]
# xh = [x, h]
xh_node = ctx.make_node("Concat", [x, h_prev], attr={"axis": 1})
# i, ci, f, o = xh * w + b
xh_w_node = ctx.make_node("MatMul", [xh_node.output[0], w])
w_shape = ctx.get_shape(w)
if len(w_shape) != 2 or w_shape[1] % 4 != 0:
raise RuntimeError("shape of W of LSTMBlockCell {} should be times of 4".format(node.name))
merged_output_node = ctx.make_node("Add", [xh_w_node.output[0], b])
w_last_dim = int(w_shape[1] / 4)
split = [w_last_dim] * 4
split_output_node = ctx.make_node(
"Split", [merged_output_node.output[0]],
attr={"axis": 1, "split": split},
output_count=4
)
i, ci, f, o = split_output_node.output
# f = f + forget_bias
forget_bias_const = ctx.make_const(
utils.make_name("{}__forget_bias".format(node.name)),
np.array(forget_bias, dtype=np.float32)
)
f_node = ctx.make_node("Add", [f, forget_bias_const.output[0]])
if not use_peephole:
zeros_const = ctx.make_const(
utils.make_name("{}__zeros_const".format(node.name)),
np.zeros([w_last_dim], dtype=np.float32)
)
nodes.append(zeros_const)
wci = zeros_const.output[0]
wcf = zeros_const.output[0]
wco = zeros_const.output[0]
# i = sigmoid(cs_prev .* wci + i)
i = make_sigmoid(cs_prev, wci, i)
# f = sigmoid(cs_prev .* wcf + f)
f = make_sigmoid(cs_prev, wcf, f_node.output[0])
# ci = Tanh(ci)
ci_node = ctx.make_node("Tanh", [ci])
# cs = ci .* i + f .* cs_prev
ci_i_node = ctx.make_node("Mul", [ci_node.output[0], i])
cs_prev_f_node = ctx.make_node("Mul", [cs_prev, f])
cs_node = ctx.make_node("Add", [ci_i_node.output[0], cs_prev_f_node.output[0]])
cs = cs_node.output[0]
# cs = clip(cs)
if cell_clip > 0:
if ctx.opset < 11:
cs_clip_node = ctx.make_node("Clip", [cs], attr={"max": cell_clip, "min": -cell_clip})
nodes.append(cs_clip_node)
cs = cs_clip_node.output[0]
else:
dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(cs))
name_min = utils.make_name("{}_min".format(node.name))
name_max = utils.make_name("{}_max".format(node.name))
min_const = ctx.make_const(name_min, np.array(-cell_clip, dtype=dtype))
max_const = ctx.make_const(name_max, np.array(cell_clip, dtype=dtype))
cs_clip_node = ctx.make_node('Clip', [cs, min_const.output[0], max_const.output[0]])
nodes.append(cs_clip_node)
cs = cs_clip_node.output[0]
# o = cs * wco + o
o = make_sigmoid(cs, wco, o)
# co = Tanh(cs)
co_node = ctx.make_node("Tanh", [cs])
# h = co .* o
h_node = ctx.make_node("Mul", [co_node.output[0], o])
def replace_output(old_output, new_output):
ctx.replace_all_inputs(old_output, new_output) # ops=ctx.get_nodes()
ctx.copy_dtype(old_output, new_output)
ctx.copy_shape(old_output, new_output)
replace_output(node.output[0], i)
replace_output(node.output[1], cs)
replace_output(node.output[2], f)
replace_output(node.output[3], o)
replace_output(node.output[4], ci_node.output[0])
replace_output(node.output[5], co_node.output[0])
replace_output(node.output[6], h_node.output[0])
@classmethod
def version_7(cls, ctx, node, **kwargs):
cls.version_1(ctx, node, **kwargs)
@tf_op("CudnnRNN")
class CudnnRNN:
@classmethod
def version_10(cls, ctx, node, **kwargs):
x = node.input[0]
x_shape = ctx.get_shape(x)
h = node.input[1]
h_shape = ctx.get_shape(h)
p = node.input[3]
utils.make_sure(
node.attr["rnn_mode"].s == b"gru",
"rnn mode other than gru are not supported yet"
)
utils.make_sure(
node.attr["dropout"].f == 0,
"dropout not supported yet"
)
utils.make_sure(
node.attr["input_mode"].s == b"linear_input",
"input mode must be linear input"
)
num_dirs = 1 if node.attr["direction"].s == b"unidirectional" else 2
num_layers = int(h_shape[0] / num_dirs)
num_units = hidden_size = h_shape[2]
input_size = x_shape[2]
w_shape = [num_layers * num_dirs, 3 * hidden_size, input_size]
w_shape_const = ctx.make_const(utils.make_name("w_shape"), np.array(w_shape, dtype=np.int64))
r_shape = [num_layers * num_dirs, 3 * hidden_size, hidden_size]
r_shape_const = ctx.make_const(utils.make_name("r_shape"), np.array(r_shape, dtype=np.int64))
b_shape = [num_layers * num_dirs, 6 * hidden_size]
b_shape_const = ctx.make_const(utils.make_name("b_shape"), np.array(b_shape, dtype=np.int64))
zero_const = ctx.make_const(utils.make_name("zero"), np.array([0], dtype=np.int64))
w_end = np.prod(w_shape)
w_end_const = ctx.make_const(utils.make_name("w_end"), np.array([w_end], dtype=np.int64))
r_end = w_end + np.prod(r_shape)
r_end_const = ctx.make_const(utils.make_name("r_end"), np.array([r_end], dtype=np.int64))
b_end = r_end + np.prod(b_shape)
b_end_const = ctx.make_const(utils.make_name("b_end"), np.array([b_end], dtype=np.int64))
def name(nm):
return node.name + "_" + nm
ws = [name('W_' + str(i)) for i in range(num_layers * num_dirs)]
rs = [name('R_' + str(i)) for i in range(num_layers * num_dirs)]
bs = [name('B_' + str(i)) for i in range(num_layers * num_dirs)]
hs = [name('H_' + str(i)) for i in range(num_layers * num_dirs)]
yhs = [name('YH_' + str(i)) for i in range(num_layers * num_dirs)]
w_flattened = ctx.make_node('Slice', [p, zero_const.output[0], w_end_const.output[0]])
r_flattened = ctx.make_node('Slice', [p, w_end_const.output[0], r_end_const.output[0]])
b_flattened = ctx.make_node('Slice', [p, r_end_const.output[0], b_end_const.output[0]])
w = utils.make_name('W')
r = utils.make_name('R')
b = utils.make_name('B')
ctx.make_node('Reshape', [w_flattened.output[0], w_shape_const.output[0]], outputs=[w])
ctx.make_node('Reshape', [r_flattened.output[0], r_shape_const.output[0]], outputs=[r])
ctx.make_node('Reshape', [b_flattened.output[0], b_shape_const.output[0]], outputs=[b])
ctx.make_node('Split', [w], outputs=ws)
ctx.make_node('Split', [r], outputs=rs)
ctx.make_node('Split', [b], outputs=bs)
ctx.make_node('Split', [h], outputs=hs)
xnf = xnb = x
for i in range(num_layers):
suffix = '_' + str(i * num_dirs)
ctx.make_node('GRU',
[xnf, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)],
outputs=[name('Y' + suffix), name('YH' + suffix)],
attr={'direction': 'forward', 'hidden_size': num_units})
xnf = name(x + suffix)
ctx.make_node('Squeeze', [name('Y' + suffix)], outputs=[xnf], attr={'axes': [1]})
if num_dirs == 2:
suffix = '_' + str(i * 2 + 1)
ctx.make_node('GRU',
[xnb, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)],
outputs=[name('Y' + suffix), name('YH' + suffix)],
attr={'direction': 'reverse', 'hidden_size': num_units})
xnb = name(x + suffix)
ctx.make_node('Squeeze', [name('Y' + suffix)], outputs=[xnb], attr={'axes': [1]})
ctx.remove_node(node.name)
if num_dirs == 2:
ctx.make_node('Concat', [xnf, xnb], outputs=[node.output[0]], attr={'axis': -1})
else:
ctx.make_node('Identity', [xnf], outputs=[node.output[0]])
ctx.make_node('Concat', yhs, outputs=[node.output[1]], attr={'axis': 0})
|
zzpmiracle/tensorflow-onnx | tf2onnx/onnx_opset/signal.py | <reponame>zzpmiracle/tensorflow-onnx
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
signal
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from onnx import onnx_pb
from onnx.numpy_helper import to_array
from tf2onnx import utils
from tf2onnx.handler import tf_op
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring
def make_dft_constant(length, dtype, fft_length):
n = np.arange(length)
k = n.reshape((length, 1)).astype(np.float64)
mat = np.exp(-2j * np.pi * k * n / length)
mat = mat[:fft_length // 2 + 1]
both = np.empty((2,) + mat.shape, dtype=dtype)
both[0, :, :] = np.real(mat)
both[1, :, :] = np.imag(mat)
return both
@tf_op("RFFT")
class RFFTOp:
# support more dtype
@classmethod
def version_1(cls, ctx, node, **kwargs):
"""
Inspired from `Python implementation of RFFT
<https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/>`_.
Complex version:
::
import numpy as np
def _DFT_cst(N, fft_length):
n = np.arange(N)
k = n.reshape((N, 1)).astype(np.float64)
M = np.exp(-2j * np.pi * k * n / N)
return M[:fft_length // 2 + 1]
def DFT(x, fft_length=None):
if len(x.shape) == 1:
x = x.reshape((-1, 1))
else:
x = x.T
if fft_length is None:
fft_length = x.shape[0]
cst = _DFT_cst(x.shape[0], fft_length)
return np.dot(cst, x).T
Real version, first axis is (real, imag) part:
::
import numpy as np
def _DFT_real_cst(N, fft_length):
n = np.arange(N)
k = n.reshape((N, 1)).astype(np.float64)
M = np.exp(-2j * np.pi * k * n / N)
M = M[:fft_length // 2 + 1]
both = np.empty((2,) + M.shape)
both[0, :, :] = np.real(M)
both[1, :, :] = np.imag(M)
return both
def DFT_real(x, fft_length=None):
if len(x.shape) == 1:
x = x.reshape((-1, 1))
else:
x = x.T
if fft_length is None:
fft_length = x.shape[0]
cst = _DFT_real_cst(x.shape[0], fft_length)
res = np.dot(cst, x)
return np.transpose(res, (0, 2, 1))
"""
supported_dtypes = [
onnx_pb.TensorProto.FLOAT,
onnx_pb.TensorProto.FLOAT16,
onnx_pb.TensorProto.DOUBLE,
onnx_pb.TensorProto.COMPLEX64,
onnx_pb.TensorProto.COMPLEX128,
]
consumers = ctx.find_output_consumers(node.output[0])
consumer_types = set(op.type for op in consumers)
utils.make_sure(
consumer_types == {'ComplexAbs'},
"Current implementation of RFFT only allows ComplexAbs as consumer not %r",
consumer_types)
onnx_dtype = ctx.get_dtype(node.input[0])
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
shape = ctx.get_shape(node.input[0])
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
shape_n = shape[-1]
utils.make_sure(len(node.input) == 2, "Two inputs expected not %r", len(node.input))
# This input should be a constant.
fft_length_name = node.input[1]
node_fft_length = ctx.get_node_by_output(fft_length_name, search_in_parent_graphs=True)
utils.make_sure(node_fft_length.type == 'Const',
"fft_length should be a constant, the other case is not implemented yet.")
value = node_fft_length.get_attr("value")
value_array = to_array(value.t)
utils.make_sure(value_array.shape == (1,), "Unexpected shape for fft_length (%r)", value_array.shape)
fft_length = value_array[0]
# TODO: handle this parameter when onnx.helper.make_node is fixed.
# Tcomplex = node.get_attr("Tcomplex")
if np_dtype == np.float16:
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float16)
np_dtype = np.float16
elif np_dtype in (np.float32, np.complex64):
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float32)
np_dtype = np.float32
else:
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float64)
np_dtype = np.float64
real_imag_part = make_dft_constant(shape_n, np_dtype, fft_length)
onx_real_imag_part = ctx.make_const(
name=utils.make_name('cst_rfft_%d' % shape_n), np_val=real_imag_part)
shapei = list(np.arange(len(shape)))
perm = shapei[:-2] + [shapei[-1], shapei[-2]]
trx = ctx.make_node(
"Transpose", inputs=[node.input[0]], attr=dict(perm=perm),
name=utils.make_name(node.name + 'tr'))
ctx.remove_node(node.name)
mult = ctx.make_node(
"MatMul", inputs=[onx_real_imag_part.name, trx.output[0]],
name=utils.make_name('CPLX_' + node.name + 'rfft'))
new_shape = [2] + list(shape)
shapei = list(np.arange(len(new_shape)))
perm = shapei[:-2] + [shapei[-1], shapei[-2]]
last_node = ctx.make_node(
"Transpose", inputs=[mult.output[0]], attr=dict(perm=perm),
name=utils.make_name('CPLX_' + node.name + 'rfft'),
shapes=[new_shape], dtypes=[res_onnx_dtype])
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
@tf_op("ComplexAbs")
class ComplexAbsOp:
# support more dtype
@classmethod
def any_version(cls, opset, ctx, node, **kwargs):
"""
Computes the modules of a complex.
If the matrix dtype is not complex64 or complex128,
it assumes the first dimension means real part (0)
and imaginary part (1, :, :...).
"""
supported_dtypes = [
onnx_pb.TensorProto.FLOAT,
onnx_pb.TensorProto.FLOAT16,
onnx_pb.TensorProto.DOUBLE,
onnx_pb.TensorProto.COMPLEX64,
onnx_pb.TensorProto.COMPLEX128,
]
onnx_dtype = ctx.get_dtype(node.input[0])
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
shape = ctx.get_shape(node.input[0])
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
utils.make_sure(shape[0] == 2, "ComplexAbs expected the first dimension to be 2 but shape is %r", shape)
ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64))
ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64))
p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype))
real_part = ctx.make_node(
'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0),
name=utils.make_name('Real_' + node.name))
imag_part = ctx.make_node(
'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0),
name=utils.make_name('Imag_' + node.name))
real_part2 = ctx.make_node(
'Pow', inputs=[real_part.output[0], p2.name],
name=utils.make_name(real_part.name + 'p2p'))
imag_part2 = ctx.make_node(
'Pow', inputs=[imag_part.output[0], p2.name],
name=utils.make_name(imag_part.name + 'p2p'))
ctx.remove_node(node.name)
add = ctx.make_node(
"Add", inputs=[real_part2.output[0], imag_part2.output[0]],
name=utils.make_name('ComplexAbs_' + node.name))
if opset == 1:
squeezed = ctx.make_node(
"Squeeze", inputs=add.output[:1], attr=dict(axes=[0]),
name=utils.make_name('ComplexAbs' + node.name))
else:
squeezed = ctx.make_node(
"Squeeze", inputs=[add.output[0], ind0],
name=utils.make_name('ComplexAbsSqr' + node.name))
last_node = ctx.make_node(
"Sqrt", inputs=squeezed.output[:1],
name=utils.make_name('ComplexAbs' + node.name),
shapes=[shape[1:]], dtypes=[onnx_dtype])
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
@classmethod
def version_1(cls, ctx, node, **kwargs):
cls.any_version(1, ctx, node, **kwargs)
@classmethod
def version_13(cls, ctx, node, **kwargs):
cls.any_version(11, ctx, node, **kwargs)
|
zzpmiracle/tensorflow-onnx | tf2onnx/rewriter/rnn_utils.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.rnn_utils - rnn support
"""
from __future__ import unicode_literals
from collections import defaultdict
from enum import Enum
import logging
import numpy as np
from tf2onnx import utils
from tf2onnx.graph_builder import GraphBuilder
from tf2onnx.graph_matcher import OpTypePattern # pylint: disable=unused-import
# pylint: disable=invalid-name,unused-argument,missing-docstring
logger = logging.getLogger(__name__)
class REWRITER_RESULT(Enum):
SKIP = 1
OK = 2
FAIL = 3
# TensorFlow LSTMCell/BasicLSTMCell computation graph matching
xc_pattern = \
OpTypePattern('Split', inputs=[
OpTypePattern("Const"), # axis for split
OpTypePattern("BiasAdd", name="bias_add", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern("ConcatV2|Concat", name="xh"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_kernel"),
]),
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_bias"),
]),
]),
])
lstmcell_pattern = \
OpTypePattern('Mul', name='ht', inputs=[
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern]),
OpTypePattern('Tanh', inputs=[
OpTypePattern("Add|AddV2", name="ct", inputs=[
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
OpTypePattern("Sigmoid", name="ft", inputs=[
OpTypePattern("Add|AddV2", inputs=[
xc_pattern,
OpTypePattern("*", name="ft_bias"),
]),
]),
OpTypePattern("*"),
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern]),
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern]),
]),
]),
]),
])
xc_pattern_optimized = \
OpTypePattern('Split', inputs=[
OpTypePattern("Const"),
OpTypePattern("Identity", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern("ConcatV2|Concat", name="xh"),
OpTypePattern("Const", name="cell_kernel"),
]),
]),
])
lstmcell_pattern_optimized = \
OpTypePattern('Mul', name='ht', inputs=[
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern_optimized]),
OpTypePattern('Tanh', inputs=[
OpTypePattern("Add|AddV2", name="ct", inputs=[
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
OpTypePattern("Sigmoid", name="ft", inputs=[
OpTypePattern("Add|AddV2", inputs=[
xc_pattern_optimized,
OpTypePattern("*", name="ft_bias"),
]),
]),
OpTypePattern("*"),
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern_optimized]),
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern_optimized]),
]),
]),
]),
])
# input sequence: top to down, left to right
# split into update gate and reset gate
gru_split_pattern = \
OpTypePattern("Split", inputs=[
OpTypePattern("Const"), # split dim, a constant
OpTypePattern("Sigmoid", inputs=[
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_bias")
]),
OpTypePattern("MatMul", name="update_reset_gate", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_kernel")
]),
OpTypePattern("ConcatV2|Concat", name="cell_inputs")
])
])
])
])
grucell_pattern = \
OpTypePattern("Add", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("Identity")
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"), # 1-u
gru_split_pattern
]),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_kernel")
]),
OpTypePattern("ConcatV2|Concat")
])
])
])
])
])
cudnn_compatible_grucell_pattern = \
OpTypePattern("Add", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"), # 1-u
gru_split_pattern
]),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("Add", inputs=[
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_state_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_state_kernel"),
]),
OpTypePattern("Identity")
])
])
]),
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_input_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_input_kernel"),
]),
OpTypePattern("*")
])
])
])
])
]),
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("Identity")
])
])
grublockcell_pattern0 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_kernel")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_kernel")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_bias")
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_bias")
])
])
grublockcell_pattern1 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Const", name="gate_kernel"),
OpTypePattern("Const", name="hidden_kernel"),
OpTypePattern("Const", name="gate_bias"),
OpTypePattern("Const", name="hidden_bias")
])
lstmblockcell_pattern = \
OpTypePattern("LSTMBlockCell", name="lstm_block_cell", inputs=[
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_kernel")
]),
OpTypePattern("*", name="Pi"),
OpTypePattern("*", name="Pf"),
OpTypePattern("*", name="Po"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_bias")
])
])
seq_len_pattern0 = OpTypePattern("Select|SelectV2", inputs=[
OpTypePattern("GreaterEqual", inputs=[
OpTypePattern("*"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="seq_len_node")
])
]),
OpTypePattern("*"),
OpTypePattern("*")
])
seq_len_pattern1 = OpTypePattern("Select|SelectV2", inputs=[
OpTypePattern("GreaterEqual", inputs=[
OpTypePattern("*"),
OpTypePattern("Const", name="seq_len_node")
]),
OpTypePattern("*"),
OpTypePattern("*")
])
class RNNUnitType(Enum):
LSTMCell = 0 # TF LSTMCell and BasicLSTMCell share the same pattern
LSTMBlockCell = 1
GRUCell = 2
GRUBlockCell = 3
CudnnCompatibleGRUCell = 4
rnn_cell_patterns = {
RNNUnitType.LSTMCell: [lstmcell_pattern, lstmcell_pattern_optimized],
RNNUnitType.LSTMBlockCell: [lstmblockcell_pattern],
RNNUnitType.GRUCell: [grucell_pattern],
RNNUnitType.GRUBlockCell: [grublockcell_pattern0, grublockcell_pattern1],
RNNUnitType.CudnnCompatibleGRUCell: [cudnn_compatible_grucell_pattern]
}
def get_pattern(cell_type_name):
return rnn_cell_patterns[cell_type_name]
def get_rnn_scope_name(while_scope_name):
parts = while_scope_name.split('/')
rnn_scope = '/'.join(parts[0:-2]) + "/"
return rnn_scope
def parse_rnn_loop(graph, loop_properties, rnn_scope, while_context_scope):
"""check if the while loop is generated by dynamic_rnn or bidirectional_rnn
Args:
loop_properties: LoopProperties
rnn_scope: rnn scope name
while_context_scope: while loop scope name
check a while loop is generated by dynamic_rnn or bidirectional_rnn by
1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write
2. some patterns in control_flow_ops.while_loop in dynamic_rnn:
cond: time < loop_bound
loop_vars: (time, output_ta, state)
time has name called "time"
iteration_cnt is added by control flow.
be noted:
1. iteration counter does not exist in tf1.4 or earlier versions
2. if dynamic_rnn's first input is not consumed, output ta does not exist.
"""
time_name = rnn_scope + "time"
ta_array_name_prefix = rnn_scope + "dynamic_rnn/output_"
iteration_counter_name = while_context_scope + "iteration_counter"
found_time = False
is_rnn_out_ta = None
time_var = None
iteration_var = None
for val in loop_properties.all_variables.values():
enter_input_node = graph.get_node_by_output(val.enter_input_id)
if val.is_tensor_array:
ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8")
if not ta_name.startswith(ta_array_name_prefix):
is_rnn_out_ta = False
elif enter_input_node.name == time_name:
found_time = True
time_var = val
elif enter_input_node.name == iteration_counter_name:
iteration_var = val
if not found_time or is_rnn_out_ta is False:
logger.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s",
found_time, is_rnn_out_ta)
return None
if not loop_properties.tensor_array_inputs:
logger.debug("this should not be a dynamic_rnn loop, no ta input is found")
return None
return time_var, iteration_var
def get_weights_from_const_node(g, node):
temp = node
val = None
# this would help ignore Identity in non-const_folded graph.
while temp.type == 'Identity':
temp = temp.inputs[0]
if temp and temp.type == 'Const':
val = temp.get_tensor_value(as_list=False)
dtype = utils.map_onnx_to_numpy_type(g.get_dtype(temp.output[0]))
val = val.astype(dtype)
logger.debug("found weights %s", temp.name)
else:
logger.debug("weight node seems not to be Const, skip, node name is %s", temp.name)
return None
return val
######################################################
#### Utilities for bidirectional rnn #######
######################################################
class ONNX_RNN_TYPE(Enum):
GRU = 0
LSTM = 1
onnx_rnn_type_mapping = {
ONNX_RNN_TYPE.GRU: "GRU",
ONNX_RNN_TYPE.LSTM: "LSTM"
}
onnx_rnn_attr_mapping = {
ONNX_RNN_TYPE.LSTM: [
"clip",
"hidden_size",
"input_forget"
],
ONNX_RNN_TYPE.GRU: {
"clip",
"hidden_size",
"linear_before_reset"
}
}
onnx_rnn_seq_len_index_mapping = {
ONNX_RNN_TYPE.LSTM: 4,
ONNX_RNN_TYPE.GRU: 4
}
def find_bidirectional_rnns(g, ops, rnn_type):
"""
Find possible bidirectional rnns, return: list of tuple,
Format of tuple is (fw onnx rnn node, bw onnx rnn node).
"""
fw_rnns = defaultdict(list)
bw_rnns = defaultdict(list)
for n in g.get_nodes():
if n.type != onnx_rnn_type_mapping[rnn_type]:
continue
input_id = n.input[0]
temp = n.inputs[0]
is_bw = False
if temp.type == "Transpose":
input_id = temp.input[0]
temp = temp.inputs[0]
if utils.is_tf_reverse_op(temp):
input_id = temp.input[0]
is_bw = True
if is_bw:
# if output 0 is consumed and there is no reverse after the 1st output.
# it's not backward rnn.
if g.find_output_consumers(n.output[0]) and not get_reverse_nodes_after_y_output(g, n):
logger.warning("rnn %s following Reverse op isn't the part of bi-rnn.", n.name)
continue
logger.debug("find bw rnn %s", input_id)
bw_rnns[input_id].append(n)
else:
logger.debug("find fw rnn %s", input_id)
fw_rnns[input_id].append(n)
# fw_rnn and bw_rnn must share the same input
birnn_input = list(set(fw_rnns.keys()).intersection(bw_rnns.keys()))
bi_rnns = []
matched_rnn = []
for inp in birnn_input:
fw_rnn = fw_rnns[inp]
bw_rnn = bw_rnns[inp]
# it's possible several bi-rnns share the same input
for fw_n in fw_rnn:
for bw_n in bw_rnn:
if belong_to_birnn(g, fw_n, bw_n, rnn_type) and \
not fw_n in matched_rnn and not bw_n in matched_rnn:
logger.debug("found birnn comprising %s and %s", fw_n.name, bw_n.name)
bi_rnns.append((fw_n, bw_n))
matched_rnn.extend([fw_n, bw_n])
return bi_rnns
def belong_to_birnn(g, fw_rnn, bw_rnn, rnn_type):
"""
Check whether fw_rnn and bw_rnn are part of the same birnn.
If fw_rnn and bw_rnn have the same attributes except those related to activation
and share the same seq_len, they are able to be merged into a bi-rnn.
"""
logger.debug("check whether %s and %s are part of birnn", fw_rnn.name, bw_rnn.name)
for name in onnx_rnn_attr_mapping[rnn_type]:
fw_attr_value = fw_rnn.get_attr_value(name)
bw_attr_value = bw_rnn.get_attr_value(name)
if fw_attr_value != bw_attr_value:
logger.debug(
"fw_rnn and bw_rnn mismatch at attr %s: %s, %s",
name, fw_attr_value, bw_attr_value
)
return False
seq_len_index = onnx_rnn_seq_len_index_mapping[rnn_type]
fw_seq_len = fw_rnn.input[seq_len_index]
bw_seq_len = bw_rnn.input[seq_len_index]
if not utils.have_same_inference_value(g, fw_seq_len, bw_seq_len):
logger.debug(
"fw_rnn and bw_rnn have different seq_len input: %s, %s",
fw_seq_len, bw_seq_len
)
return False
return True
def get_reverse_nodes_after_y_output(g, rnn_bw):
bw_consumers = g.find_output_consumers(rnn_bw.output[0])
# todo: figure out a better way to remove reverse op
squeeze_nodes = [c for c in bw_consumers if c.type == "Squeeze"]
s_cnt = len(squeeze_nodes)
if s_cnt == 1:
s = squeeze_nodes[0]
trans_nodes = g.find_output_consumers(s.output[0])
if len(trans_nodes) == 1:
if trans_nodes[0].type == "Transpose":
reverse_nodes = g.find_output_consumers(trans_nodes[0].output[0])
elif utils.is_tf_reverse_op(trans_nodes[0]):
reverse_nodes = trans_nodes
else:
logger.debug("not found reverse op, unexpected")
return []
are_all_reverse = all([utils.is_tf_reverse_op(r_op) for r_op in reverse_nodes])
if are_all_reverse:
return reverse_nodes
logger.debug("bw y output is used followed by reverse node")
return []
logger.debug("unexpected number of transpose after RNN 1st output:%s", s_cnt)
return []
logger.debug("unexpected number of squeeze following RNN 1st output:%s", s_cnt)
return []
def get_np_val_for_const(g, node, input_index):
return node.inputs[input_index].get_tensor_value(as_list=False)
def check_const(g, input_id):
node = g.get_node_by_output(input_id)
if node and node.is_const():
return (True, node.get_tensor_value(as_list=False))
return (None, None)
def process_single_init_node(g, fw_init_input_id, bw_init_input_id, to_append):
fw_init_is_const, init_fw_val = check_const(g, fw_init_input_id)
bw_init_is_const, init_bw_val = check_const(g, bw_init_input_id)
if fw_init_is_const and bw_init_is_const:
initial_val = np.concatenate((init_fw_val, init_bw_val), axis=0)
init_name = utils.make_name("initial")
init_node = g.make_const(init_name, initial_val, skip_conversion=True)
else:
init_node = g.make_node("Concat", [fw_init_input_id, bw_init_input_id], attr={"axis": 0})
to_append.append(init_node)
return init_node
def slice_birnn_for_original_rnn_consumers(g, rnn_fw, rnn_bw, bi_rnn, rnn_output_index, all_nodes, to_remove):
fw_consumers = g.find_output_consumers(rnn_fw.output[rnn_output_index])
bw_consumers = g.find_output_consumers(rnn_bw.output[rnn_output_index])
if not fw_consumers and not bw_consumers:
return
if rnn_output_index == 0:
axis = 1
# remove reverse op for rnn_bw
reverse_nodes = get_reverse_nodes_after_y_output(g, rnn_bw)
for r_op in reverse_nodes:
logger.debug("remove reverse op %s", r_op.name)
g.replace_all_inputs(r_op.output[0], r_op.input[0], ops=all_nodes)
to_remove.append(r_op.name)
elif rnn_output_index in [1, 2]:
axis = 0
else:
raise ValueError("rnn only should has 3 outputs.")
if fw_consumers:
attr = {"axes": [axis], "starts": [0], "ends": [1]}
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
slice_node_fw = GraphBuilder(g).make_slice(inputs_map)
all_nodes.append(g.get_node_by_output(slice_node_fw))
g.replace_all_inputs(rnn_fw.output[rnn_output_index], slice_node_fw, ops=fw_consumers)
if bw_consumers:
attr = {"axes": [axis], "starts": [1], "ends": [2]}
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
slice_node_bw = GraphBuilder(g).make_slice(inputs_map)
all_nodes.append(g.get_node_by_output(slice_node_bw))
g.replace_all_inputs(rnn_bw.output[rnn_output_index], slice_node_bw, ops=bw_consumers)
def remove_reverse_in_bw_input(g, bw_rnn_input_x, rnn_type):
old_x_consumers = g.find_output_consumers(bw_rnn_input_x)
# the transpose/reverse here must be followed by RNN if it is still useful.
# this is guaranteed by dynamic_rnn logic.
old_x_has_rnn_as_consumer = [n for n in old_x_consumers if n.type == onnx_rnn_type_mapping[rnn_type]]
if not old_x_has_rnn_as_consumer:
logger.debug("plan to remove useless reverse op in bw")
reverse_node = g.get_node_by_output(bw_rnn_input_x)
if reverse_node.type == "Transpose":
reverse_node = reverse_node.inputs[0]
g.replace_all_inputs(reverse_node.output[0], reverse_node.input[0]) # ops=g.get_nodes()
g.remove_node(reverse_node.name)
else:
raise ValueError("Reverse is still used by RNN as input, cannot remove")
|
zzpmiracle/tensorflow-onnx | tf2onnx/optimizer/const_fold_optimizer.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""const fold Optimizer.
if op's inputs are all const then do op computation when building the graph to improve performance
for example, input of transpose node is const then we can do transpose statically instead of at runtime
"""
from .. import utils
from .optimizer_base import GraphOptimizerBase
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
# key is op_type, value is the function to compute outputs
# the schema of function is: inputs are(node, graph), output is a list of constant values.
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class ConstFoldOptimizer(GraphOptimizerBase):
def __init__(self): # pylint: disable=useless-super-delegation
super(ConstFoldOptimizer, self).__init__()
def _optimize(self, graph):
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
def _optimize_at_current_graph_level(self, graph):
graph_changed = True
while graph_changed:
graph_changed = False
ops = graph.get_nodes()
for op in ops:
if self._should_skip(op):
continue
if self._fold_node(op, graph):
graph_changed = True
self.graph_been_opt = True
return graph
@staticmethod
def _should_skip(node):
# only support onnx official op for now, op in other domain is not supported for now
if not utils.is_onnx_domain(node.domain):
return True
if node.is_const() or node.is_graph_input():
return True
skip_type = ["Identity"]
if node.type in skip_type:
return True
return False
def _fold_node(self, node, graph):
""" if node's input are all const and it's not graph's output then it can be fold.
if node can be fold True will be return indicating that graph is changed
"""
if self._all_inputs_are_const(node.inputs) and not self._is_graph_output(node, graph):
process_func = _func_map.get(node.type, None)
if process_func:
const_outputs = process_func(node, graph)
self._replace_node_with_const(node, graph, const_outputs)
return True
self.logger.debug("need to add function to fold op %s whose op_type is %s", node.name, node.type)
return False
@staticmethod
def _all_inputs_are_const(nodes):
return all(node.is_const() for node in nodes if node)
@staticmethod
def _is_graph_output(node, graph):
node_out_set = set(node.output)
graph_out_set = set(graph.outputs)
return node_out_set.intersection(graph_out_set)
@staticmethod
def _replace_node_with_const(node, graph, vals):
utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same")
for old_input, val in zip(node.output, vals):
const_node = graph.make_const(utils.make_name("const_fold_opt"), val)
graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype))
graph.set_shape(const_node.output[0], val.shape)
graph.replace_all_inputs(old_input, const_node.output[0]) # ops=graph.get_nodes()
graph.remove_node(node.name)
@staticmethod
@_register_func("Cast")
def _fold_cast(node, graph):
const_val = node.inputs[0].get_tensor_value(as_list=False)
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i]
const_val_after_cast = const_val.astype(np_dtype)
return [const_val_after_cast]
@staticmethod
@_register_func("Transpose")
def _fold_transpose(node, graph) -> list:
const_val = node.inputs[0].get_tensor_value(as_list=False)
perm_attr = node.get_attr("perm")
perm = perm_attr.ints if perm_attr else None
const_val_after_trans = const_val.transpose(perm)
return [const_val_after_trans]
@staticmethod
@_register_func("Reshape")
def _fold_reshape(node, graph):
const_val_data = node.inputs[0].get_tensor_value(as_list=False)
const_val_shape = node.inputs[1].get_tensor_value(as_list=False)
const_val_after_trans = const_val_data.reshape(const_val_shape)
return [const_val_after_trans]
@staticmethod
@_register_func("Unsqueeze")
def _fold_unsqueeze(node, graph):
"""
numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic
"""
const_val = node.inputs[0].get_tensor_value(as_list=False)
axes = list(node.get_attr("axes").ints)
utils.make_sure(all(axis >= 0 for axis in axes), "onnx spec says it only supports positive axis")
shape_in = const_val.shape
dims_out = len(shape_in) + len(axes)
# calculate the shape of output accroding to onnx Unsqueeze's spec
# https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze
shape_in = iter(shape_in)
shape_out = [None] * dims_out
for ind in axes:
shape_out[ind] = 1
for ind, val in enumerate(shape_out):
if val is None:
shape_out[ind] = next(shape_in)
const_val_after_unsqueeze = const_val.reshape(shape_out)
return [const_val_after_unsqueeze]
|
AbrahamOsco/modelostp1 | ResolucionTP2.py | #Lee los archivos y devuelve dos diccionarios uno para las prendas incompatibles
#Y otro diccionario para el tiempo que demora cada prenda
import time
class Prenda:
def __init__(self,nroPrenda):
self.nro = nroPrenda
self.tiempoLavado = 0
self.prendasIncomp = []
self.estaLavada = False
def definirTiempo(self,unTiempo):
self.tiempoLavado=unTiempo
def addIncompatibilidad(self,prendaIncompatible):
if prendaIncompatible not in self.prendasIncomp:
self.prendasIncomp.append(prendaIncompatible)
def getPrendasIncomp(self):
return self.prendasIncomp
def getNro(self):
return self.nro
def cantidadIncom(self):
return len(self.prendasIncomp)
def getTiempo(self):
return self.tiempoLavado
def lavarPrenda(self):
self.estaLavada = True
def prendaLavada(self):
return self.estaLavada
class Lavado:
def __init__(self,nroDeLavado):
self.nroLavado=nroDeLavado
self.prendas = []
def agregarPrendaAlLavado(self,unaPrenda):
if(unaPrenda not in (self.prendas) and self.sePuedeAgregarPrenda(unaPrenda) ):
self.prendas.append(unaPrenda)
def sePuedeAgregarPrenda(self,unaPrenda):
for prenda in self.prendas:
if(unaPrenda.getNro() in prenda.getPrendasIncomp()):
return False
return True
def getListaPrendas(self):
return self.prendas
def getNroLavado(self):
return self.nroLavado
def estaNroPrendaEnLavado(self,nro):
for prenda in self.prendas:
if(prenda.getNro() == nro):
return True
return False
def leerArchivos():
archivo = open("tercer_problema.txt","r")
linea = archivo.readline()
listaIncomp = []
while(linea!=""):
linea= (linea.strip()).split()
if(linea[0] == 'p'):
inicializarLista(linea,listaIncomp)
if(linea[0] == 'e'):
agregarIncomp(listaIncomp,int (linea[1]), int (linea[2]) )
elif(linea[0] =='n'):
listaIncomp[int (linea[1])-1].definirTiempo( int (linea[2]) )
linea = archivo.readline()
return listaIncomp
def agregarIncomp(listaIncomp,valor1,valor2,):
#Agregamos en ambos sentidos la incompatibilidad de la prenda
listaIncomp[valor1-1].addIncompatibilidad(valor2)
listaIncomp[valor2-1].addIncompatibilidad(valor1)
def inicializarLista(linea,listaIncomp):
cantidadPrendas = int (linea[2]) #Esto si .
for i in range(1,cantidadPrendas+1):
listaIncomp.append( Prenda(i))
#Obtenemos una lista con las prendas con mayor cantidad de incompatibilidades
#Es decir seria una lista con las prendas (vertices) de mayor grado.
#Grado de un vertice definición: Es la cantidad de aristas que inciden en el .
#Paso 1: Ordenar los vértices en orden decreciente de grados.
def listaPrendasConMasIncomp(listaIncomp):
listaOrden = sorted(listaIncomp,key=lambda p:p.cantidadIncom() ,reverse=True)
return listaOrden
#Dado una prendaEspecifica (vertice) vamos a obtener la cantidad de colores(lavados) usados
#en los vecinos de esta prendaEspecifica (vertice).Esto es la definicion de grado de color de un vertice.
def obtenerGradoColor(prendaEsp,lavados):
nroLavadosVecinos = []
for prenda in prendaEsp.getPrendasIncomp():
for lavado in lavados:
if(lavado.getNroLavado() not in nroLavadosVecinos and lavado.estaNroPrendaEnLavado(prenda) ):
nroLavadosVecinos.append(lavado.getNroLavado())
return len(nroLavadosVecinos)
def obtenerGradoColorMax(listaPrendas,lavados):
gradoColorMax = 0
contador = 1
prendasSiguientes = [] #el grado de color se puede repetir, tomaremos la prenda que mas tarda en lavar.
for prenda in listaPrendas:
gradoActual = obtenerGradoColor(prenda,lavados)
if(gradoActual>gradoColorMax):
gradoColorMax = gradoActual
prendasSiguientes.clear() #Limpio toda la lista porque encontre un grado de color mucho mas grande.
prendasSiguientes.append(prenda)
elif(gradoActual == gradoColorMax): #Paso 3.1: Si hay varios, elegimos el de grado máximo, asi dice el algoritmo nosotros solo lo
prendasSiguientes.append(prenda) #agruparemos en una lista para luego ordenarlos por el que tenga mayor tiempo .
prendasOrd = sorted(prendasSiguientes,key=lambda p:p.getTiempo(), reverse=True )
return prendasOrd[0]
def asignarLavados(listCantiIncomp):
nroLavado = 1
lavados = []
#Paso 2: Coloreamos un vértice de grado máximo con el color(lavado) 1.
prendaIni = listCantiIncomp[0]
lavados.append(Lavado(nroLavado))
lavados[0].agregarPrendaAlLavado(prendaIni)
prendaIni.lavarPrenda()
nroLavado +=1
listCantiIncomp.pop(0)
while( len(listCantiIncomp) > 0 ):
#Paso 3.0: Seleccionamos un vértice, aún sin colorear, con grado de color máximo.
prendaSgt = obtenerGradoColorMax(listCantiIncomp,lavados)
for lavado in lavados: #Paso 4: Colorear(asignar Lavado) el vértice (prenda) seleccionado en el paso 3 con el menor color(lavado) posible.
if(lavado.sePuedeAgregarPrenda(prendaSgt) and not prendaSgt.prendaLavada() ):
lavado.agregarPrendaAlLavado(prendaSgt)
prendaSgt.lavarPrenda()
listCantiIncomp.remove(prendaSgt)
if( not prendaSgt.prendaLavada() ):
lavadoActual = Lavado(nroLavado)
lavadoActual.agregarPrendaAlLavado(prendaSgt)
lavados.append(lavadoActual)
prendaSgt.lavarPrenda()
nroLavado +=1
listCantiIncomp.remove(prendaSgt)
print(len(listCantiIncomp))
#Paso 5: Si todos los vértices se han coloreado, FIN. En caso contrario, volver al paso 3.
return lavados
def escribirArchivo(listaLavados):
archivo = open("respuestasTP3Inicial.txt","w")
for lavado in listaLavados:
for prenda in lavado.getListaPrendas():
archivo.write(str(prenda.getNro()) + " " + str(lavado.getNroLavado() )+"\n")
archivo.close()
def main():
inicio = time.time()
listaIncomp = leerArchivos()
listaCantiIncomp = listaPrendasConMasIncomp(listaIncomp)
listaLavados = asignarLavados(listaCantiIncomp)
escribirArchivo(listaLavados)
fin = time.time()
print(fin-inicio)
main()
|
izapolsk/test-task | lib/widgets/sweet_alert.py | <filename>lib/widgets/sweet_alert.py
from widgetastic.widget.base import GenericLocatorWidget, View
from widgetastic.exceptions import NoSuchElementException
class SweetAlert(View):
""" Sweet alert modal widget
"""
ROOT = '//div[contains(@class, "swal-overlay") and .//div[contains(@class, "swal-modal") and @role="dialog"]]'
TITLE = './/div[contains(@class, "swal-title")]'
TEXT = './/div[contains(@class, "swal-text")]'
@property
def is_displayed(self):
""" Is the modal currently open? """
try:
return "swal-overlay--show-modal" in self.browser.classes(self)
except NoSuchElementException:
return False
@property
def title(self):
return self.browser.text(self.TITLE)
@property
def text(self):
return self.browser.text(self.TEXT)
@View.nested
class footer(View): # noqa
""" The footer of the modal """
ROOT = './/div[@class="swal-footer"]'
accept = GenericLocatorWidget(locator='.//button[contains(@class, "swal-button--confirm")]')
def accept(self):
""" Submit/Save/Accept/Delete for the modal."""
self.footer.accept.click()
|
izapolsk/test-task | lib/views/base.py | from widgetastic.widget.base import View
from widgetastic_bootstrap.button import Button
from widgetastic.widget.input import TextInput
from widgetastic.widget.text import Text
from widgetastic_patternfly import BootstrapNav
from lib.widgets.account_dropdown import AccountDropdown
class LoginView(View):
title = Text(locator='//h2[normalize-space(.)="Login"]')
email = TextInput(locator='//input[@type="email"]')
password = TextInput(locator='//input[@type="password"]')
login = Button('Login')
def log_in(self, email, password):
self.fill({
'email': email,
'password': password,
})
self.login.click()
@property
def logged_in(self):
raise NotImplementedError("This check is going to be implemented later due to limited time")
@property
def is_displayed(self):
return self.title.is_displayed
class BaseLoggedInView(View):
# todo: turn into navbar widget
@View.nested
class navbar(View):
account = AccountDropdown(id="account-dropdown")
# merge this with dropdown above
navigation = BootstrapNav('.//div/ul')
@property
def is_displayed(self):
return self.navbar.account.is_displayed
|
izapolsk/test-task | lib/widgets/html_dropdown.py | <filename>lib/widgets/html_dropdown.py<gh_stars>0
from selenium.webdriver.support.ui import Select
from widgetastic.widget import Widget
from widgetastic.xpath import quote
from widgetastic_bootstrap.dropdown import (DropdownItemDisabled, DropdownItemNotFound, NoSuchElementException)
class HTMLDropdown(Widget):
"""Represents the HTML dropdown.
Args:
locator: locator to select element
"""
OPTIONS_LOCATOR = "./option"
OPTION_LOCATOR = "./option[normalize-space(.)={}]"
def __init__(self, parent, locator, logger=None):
Widget.__init__(self, parent, logger=logger)
self.locator = locator
def __locator__(self):
return self.locator
@property
def items(self):
"""Returns a list of all dropdown items as strings."""
return [self.browser.text(el) for el in self.browser.elements(self.OPTIONS_LOCATOR)]
def has_item(self, item):
"""Returns whether the items exists.
Args:
item: item name
Returns:
Boolean - True if enabled, False if not.
"""
return item in self.items
def item_element(self, item):
"""Returns a WebElement for given item name."""
try:
return self.browser.element(self.OPTION_LOCATOR.format(quote(item)))
except NoSuchElementException:
try:
items = self.items
except NoSuchElementException:
items = []
if items:
items_string = "These items are present: {}".format("; ".join(items))
else:
items_string = "The dropdown is probably not present"
raise DropdownItemNotFound("Item {!r} not found. {}".format(item, items_string))
def item_enabled(self, item):
"""Returns whether the given item is enabled.
Args:
item: Name of the item.
Returns:
Boolean - True if enabled, False if not.
"""
el = self.item_element(item)
return self.browser.get_attribute('disabled', el) != 'true'
def item_select(self, item):
"""Opens the dropdown and selects the desired item.
Args:
item: Item to be selected
Raises:
DropdownItemDisabled
"""
self.logger.info("Selecting %r", item)
if not self.item_enabled(item):
raise DropdownItemDisabled(
'Item "{}" of dropdown "{}" is disabled\n'
'The following items are available: {}'
.format(item, self.locator, ";".join(self.items)))
select = Select(self.browser.element(self))
select.select_by_visible_text(item)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self.locator)
@property
def selected_item(self):
select = Select(self.browser.element(self))
return select.first_selected_option
def read(self):
return self.selected_item
def fill(self, value):
current_value = self.selected_item
if value == current_value:
return False
self.item_select(value)
return True
|
izapolsk/test-task | lib/widgets/bootstrap_modal.py | from widgetastic.exceptions import NoSuchElementException
from widgetastic.widget import Text, View
from widgetastic_patternfly import Modal, Button
class BootstrapModal(Modal):
@property
def is_displayed(self):
""" Is the modal currently open? """
try:
return "show" in self.browser.classes(self)
except NoSuchElementException:
return False
def dismiss(self):
""" Cancel the modal"""
self.header.close.click()
@View.nested
class header(View): # noqa
""" The header of the modal """
ROOT = './/div[contains(@class, "modal-header")]'
close = Text(locator='.//button[@class="close"]')
title = Text(locator='.//div[contains(@class,"modal-title")]')
@View.nested
class body(View): # noqa
""" The body of the modal """
ROOT = './/div[@class="modal-body"]'
description = Text(locator='.//p')
@View.nested
class footer(View): # noqa
""" The footer of the modal """
ROOT = './/div[contains(@class, "modal-footer")]'
accept = Button(classes=[Button.SUCCESS])
|
izapolsk/test-task | lib/widgets/card.py | from widgetastic.utils import ParametrizedLocator
from widgetastic.widget.base import GenericLocatorWidget
from widgetastic.widget.base import View
from widgetastic_patternfly import Button
from wait_for import wait_for
class Card(View):
ROOT = ParametrizedLocator('//div[contains(@class, "card") and boolean(@data-card) and @data-card={@id|quote}]')
ALL_CARDS = '//div[contains(@class, "card") and boolean(@data-card)]'
TITLE = './/h6'
DESCRIPTION = './/div[contains(@class, "card-body")]/p[1]'
like_button = GenericLocatorWidget(locator='.//button[./*[name()="svg" and @data-icon="thumbs-up"]]')
delete_button = Button('Delete')
def __init__(self, parent, id, logger=None):
View.__init__(self, parent, logger=logger)
self.id = id
@property
def title(self):
return self.browser.text(self.TITLE)
@property
def description(self):
return self.browser.text(self.DESCRIPTION)
@property
def liked(self):
return bool(int(self.browser.text(self.like_button)))
def wait_liked(self, liked, timeout='5s'):
return wait_for(lambda: self.like_button.is_displayed and self.liked == liked, timeout=timeout, delay=1)[0]
def like(self):
if not self.liked:
self.like_button.click()
return True
else:
return False
def unlike(self):
if self.liked:
self.like_button.click()
return True
else:
return False
def delete(self):
self.delete_button.click()
@classmethod
def all(cls, parent):
cards = []
for el in parent.browser.elements(cls.ALL_CARDS):
card_id = parent.browser.get_attribute('data-card', el)
cards.append(cls(parent, id=card_id))
return cards |
izapolsk/test-task | lib/views/board.py | <reponame>izapolsk/test-task
from widgetastic.widget import TextInput, View, GenericLocatorWidget
from widgetastic.utils import WaitFillViewStrategy
from widgetastic_bootstrap.button import Button
from lib.views.base import BaseLoggedInView
from lib.widgets.card import Card
from lib.widgets.html_dropdown import HTMLDropdown
from lib.widgets.sweet_alert import SweetAlert
class CreateBoardView(BaseLoggedInView):
fill_strategy = WaitFillViewStrategy()
session_name = TextInput(locator='//input[@type="text" and @placeholder="Session Name"]')
owner = HTMLDropdown(locator='//select[@class="custom-select"]')
create_board = Button('Create Board')
alert = SweetAlert()
class MainBoardView(BaseLoggedInView):
@property
def is_displayed(self):
return all((super(MainBoardView, self).is_displayed,
self.body.went_well.is_displayed,
self.body.went_unwell.is_displayed,
self.body.action_points.is_displayed))
@View.nested
class sidebar(View): # noqa
pass
@View.nested
class body(View): # noqa
@View.nested
class went_well(View): # noqa
ROOT = './/h5[./span[normalize-space(.)="Went well"]]'
add = GenericLocatorWidget(locator='.//button[contains(@class, "text-success") and '
'./*[name()="svg" and @data-icon="plus-circle"]]')
@property
def cards(self):
return Card.all(self)
@View.nested
class went_unwell(View): # noqa
ROOT = './/h5[./span[normalize-space(.)="Didn\'t go well"]]'
add = GenericLocatorWidget(locator='.//button[contains(@class, "text-danger") and '
'./*[name()="svg" and @data-icon="plus-circle"]]')
@property
def cards(self):
return Card.all(self)
@View.nested
class action_points(View): # noqa
ROOT = './/h5[./span[normalize-space(.)="Action items"]]'
add = GenericLocatorWidget(locator='.//button[contains(@class, "text-primary") and '
'./*[name()="svg" and @data-icon="plus-circle"]]')
@property
def cards(self):
return Card.all(self)
|
izapolsk/test-task | utils/log.py | import sys
from loguru import logger
logger.remove()
logger.add(sys.stderr, level="INFO")
|
izapolsk/test-task | lib/views/modals.py | from widgetastic.widget import View
from widgetastic_patternfly import Button
from lib.widgets.bootstrap_modal import BootstrapModal
class ConfirmModal(BootstrapModal):
@View.nested
class footer(View): # noqa
""" The footer of the modal """
ROOT = './/div[contains(@class, "modal-footer")]'
accept = Button('Confirm') |
izapolsk/test-task | utils/path.py | <reponame>izapolsk/test-task
import os
# todo: replace with getting path of installed module and entry points
repo_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
proj_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
conf_dir = os.path.join(proj_dir, 'conf')
log_dir = os.path.join(proj_dir, 'log')
tmp_dir = os.path.join(proj_dir, 'tmp')
data_dir = os.path.join(proj_dir, 'data')
|
izapolsk/test-task | tests/smoke_test.py | import pytest
from selenium.webdriver import Chrome, ChromeOptions
from wait_for import TimedOutError, wait_for
from widgetastic.browser import Browser
from lib.views.base import LoginView, BaseLoggedInView
from lib.views.board import CreateBoardView, MainBoardView
from lib.views.card import AddCardView
from lib.views.modals import ConfirmModal
from utils.log import logger
pytestmark = [pytest.mark.smoke]
# todo: move to encrypted file or vault ?
APP_URL = 'https://sprintboards.io/auth/login'
APP_CREDS = dict(email="<EMAIL>", password='<PASSWORD>')
@pytest.fixture(scope='session')
def browser(request):
# todo: replace with webdriver-kaifuku and move settings and etc to config files
logger.info("openning chrome browser")
chrome_options = ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--start-maximized')
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument('--headless')
driver = Chrome(options=chrome_options)
logger.info('step 1. Go to https://sprintboards.io/auth/login')
driver.get(APP_URL)
app_browser = Browser(selenium=driver)
request.addfinalizer(app_browser.close_window)
yield app_browser
@pytest.fixture(scope='module')
def login(browser):
# todo: replace with navmazing navigation and test application class
logger.info('Step 2. Type "<EMAIL>" in “Email Address” field')
logger.info('Step 3. Type “<PASSWORD>” as password in “Password” field')
logger.info('Step 4. Click “Login”')
login_view = LoginView(browser)
assert login_view.is_displayed, "Login View isn't open"
login_view.log_in(**APP_CREDS)
try:
BaseLoggedInView(browser).wait_displayed()
except TimedOutError:
pytest.fail("User couldn't login")
@pytest.fixture(scope='module')
def board(login, browser):
logged_in_view = BaseLoggedInView(browser)
logger.info('Step 5. Click “CREATE BOARD”')
logged_in_view.navigation.select('Create Board')
boards_view = CreateBoardView(browser)
boards_view.wait_displayed()
logger.info('Verifying Expected Result 5.1. User is taken to https://sprintboards.io/boards/create')
expected_url = 'https://sprintboards.io/boards/create'
assert browser.url == expected_url, f"Expected url is {expected_url} whereas test got {browser.url}"
# todo: check if below condition is right
logger.info('Verifying Expected Result 5.2. “Create a Board” title is displayed')
expected_title = 'Create a Board'
assert expected_title in browser.title, (f"Expected AppBrowser title is {expected_title} "
f"whereas test got {browser.title}")
# todo: check if that's ok that test has to set owner. it's not in scenario
logger.info('Step 6. Type “My first board” in “Session Name” field')
boards_view.fill(dict(session_name="My first board", owner="Sennder"))
logger.info('Step 7. Click “Create Board” button')
boards_view.create_board.click()
try:
boards_view.alert.wait_displayed()
logger.info('Verifying Expected Result 7.1. User gets a confirmation pop-up saying “Created”')
assert boards_view.alert.title == 'Created', f"Wrong alert title: {boards_view.alert.title}"
except TimedOutError:
pytest.fail("Alert hasn't been displayed")
logger.info('Verifying Expected Result 7.2. URL contains “https://sprintboards.io/boards”')
expected_url = 'https://sprintboards.io/boards'
# todo: figure out is that correct that url includes board id and expected url doesn't fully match expected result
assert browser.url.startswith(expected_url), f"Expected url is {expected_url} whereas test got {browser.url}"
yield
# todo: add delete board scenario
def test_create_green_card(board, browser):
"""
This smoke test covers creation of Went Well card scenario.
In addition, it goes thru and checks side things like logging in, board creation and etc
"""
card_title = 'Goal was achieved'
card_description = 'Sprint was well planned'
logger.info('Step 8. Click green “+” button')
boards_view = MainBoardView(browser)
boards_view.wait_displayed()
boards_view.body.went_well.add.click()
logger.info('Verifying Expected Result 8.1. A modal with title “Add a Card” is displayed')
add_card_view = AddCardView(browser)
assert add_card_view.is_displayed, "Add a Card view hasn't been displayed"
logger.info('Step 9. Type “Goal was achieved” as title')
logger.info('Step 10. Type “Sprint was well planned” as description')
add_card_view.body.fill(dict(title=card_title, description=card_description))
logger.info('Step 11. Click “Add Card” button')
add_card_view.footer.add_card.click()
boards_view.wait_displayed()
browser.plugin.ensure_page_safe()
try:
logger.info('Verifying Expected Result 11. Card is added with the title '
'and description specified in steps 9 and 10')
card = next(card for card in boards_view.body.went_well.cards if card.description == card_description and
card.title == card_title)
except StopIteration:
pytest.fail("Card hasn't been created or has wrong title or description")
def test_create_delete_red_card(board, browser):
"""
This smoke test covers create/update/delete of "Didn't go Well" card scenario.
In addition, it goes thru and checks side things like logging in, board creation and etc
"""
card_title = 'Goal was not achieved'
card_description = 'No description provided'
logger.info('Step 12. Click red “+” button')
boards_view = MainBoardView(browser)
boards_view.wait_displayed()
boards_view.body.went_unwell.add.click()
logger.info('Verifying Expected Result 12.1. A modal with title “Add a Card” is displayed')
add_card_view = AddCardView(browser)
assert add_card_view.is_displayed, "Add a Card view hasn't been displayed"
logger.info('Step 13. Type “Goal was not achieved” as title')
add_card_view.body.fill(dict(title=card_title))
logger.info('Step 14. Click “Add Card” button')
add_card_view.footer.add_card.click()
browser.plugin.ensure_page_safe()
boards_view.wait_displayed()
try:
logger.info('Verifying Expected Result 14.1 Card is added with the title specified in step 13')
logger.info('Verifying Expected Result 14.2 Card’s description is set to “No description provided.”')
wait_for(lambda: bool(boards_view.body.went_unwell.cards), timeout='5s', delay=1) # todo: turn into method
created_card = next(card for card in boards_view.body.went_unwell.cards if card.title == card_title and
card_description in card.description)
# todo: ask if that correct there is an issue that expected description doesn't fully match card's description
# it has a point at the end
except (StopIteration, TimedOutError):
pytest.fail("Card hasn't been created or has wrong title or description")
logger.info('Step 15. Click thumbs up icon for the card in the first column')
created_card.like()
logger.info('Expected Result 15. “Likes” count goes from 0 to 1')
assert created_card.wait_liked(True), "Thumbs up hasn't been updated"
logger.info('Step 16. Click “x Delete” button from the card in the second column')
created_card.delete()
logger.info('Verifying Expected Result 16. Modal appears with the following text: '
'• “Delete Card” • “Are you sure you want to continue?”')
confirm_modal = ConfirmModal(browser)
expected_title = "Delete Card"
expected_description = "Are you sure you want to continue?"
assert confirm_modal.header.is_displayed, "Confirm delete modal hasn't been displayed"
assert confirm_modal.header.title.text == expected_title, (f"Expected modal title {expected_title}, "
f"whereas got title {confirm_modal.header.title}")
assert_msg = f"Expected modal description {expected_description} whereas got description {expected_description}"
assert confirm_modal.body.description.text == expected_description, assert_msg
logger.info('Step 17. Click “Confirm” button')
confirm_modal.accept()
logger.info('Expected Result 17. Card with title “Goal was not achieved” is removed from the board')
assert created_card.wait_displayed(), "The card hasn't been removed"
|
izapolsk/test-task | lib/widgets/account_dropdown.py | <filename>lib/widgets/account_dropdown.py
from widgetastic.widget import Widget
from widgetastic.utils import ParametrizedLocator
from widgetastic_bootstrap.dropdown import (Dropdown, DropdownItemDisabled, DropdownDisabled,
UnexpectedAlertPresentException)
class AccountDropdown(Dropdown):
ROOT = ParametrizedLocator('//a[contains(@data-toggle, "dropdown") and @id={@id|quote}]')
ITEMS_LOCATOR = ParametrizedLocator(
'//div[contains(@class, "dropdown-menu") and @aria-labelledby={@id|quote}]/*[self::a or self::button]')
ITEM_LOCATOR = ParametrizedLocator(
'//div[contains(@class, "dropdown-menu") and @aria-labelledby={@id|quote}]/*[self::a '
'or self::button][normalize-space(.)={}]')
def __init__(self, parent, id, logger=None):
Widget.__init__(self, parent, logger=logger)
self.id = id
def _verify_enabled(self):
if not self.is_enabled:
raise DropdownDisabled('Dropdown "{}" is not enabled'.format(self.id))
def item_select(self, item, handle_alert=None):
"""Opens the dropdown and selects the desired item.
Args:
item: Item to be selected
handle_alert: How to handle alerts. None - no handling, True - confirm, False - dismiss.
Raises:
DropdownItemDisabled
"""
self.logger.info("Selecting %r", item)
try:
self.open()
if not self.item_enabled(item):
raise DropdownItemDisabled(
'Item "{}" of dropdown "{}" is disabled\n'
'The following items are available: {}'
.format(item, self.id, ";".join(self.items)))
self.browser.click(self.item_element(item), ignore_ajax=handle_alert is not None)
if handle_alert is not None:
self.browser.handle_alert(cancel=not handle_alert, wait=10.0)
self.browser.plugin.ensure_page_safe()
finally:
try:
self.close(ignore_nonpresent=True)
except UnexpectedAlertPresentException:
self.logger.warning("There is an unexpected alert present.")
pass
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self.id)
@property
def is_enabled(self):
"""Returns if the dropdown itself is enabled and therefore interactive."""
return "disabled" not in self.browser.classes(self)
def open(self):
self._verify_enabled()
if not self.is_open:
self.browser.click(self)
@property
def is_open(self):
return self.browser.get_attribute('aria-expanded', self) == 'true'
|
izapolsk/test-task | lib/views/card.py | <reponame>izapolsk/test-task
from widgetastic.widget import TextInput, View
from widgetastic_bootstrap.button import Button
from lib.widgets.bootstrap_modal import BootstrapModal
from lib.widgets.html_dropdown import HTMLDropdown
class AddCardView(BootstrapModal):
@View.nested
class body(View): # noqa
""" The body of the modal """
ROOT = './/div[@class="modal-body"]'
title = TextInput(locator='.//input[@type="text"]')
description = TextInput(locator='.//textarea')
card_type = HTMLDropdown(locator='.//select[@class="custom-select"]')
@View.nested
class footer(View): # noqa
""" The footer of the modal """
ROOT = './/div[contains(@class, "modal-footer")]'
add_card = Button('Add Card') |
IntaSend/intasend-python | intasend/utils.py | from Crypto.PublicKey import RSA
import OpenSSL
from OpenSSL import crypto as OpenSSLCrypto
def generate_keys():
"""
Returns a private and public key.
Returns:
tupple: values of generated private and public key
"""
key = RSA.generate(2048)
private_key = key.export_key('PEM')
public_key = key.publickey().export_key('PEM')
return private_key, public_key
def sign_message(private_key, message):
"""
Sign message with the private key.
Args:
private_key (byte): Private key
message (string): Message to sign
Returns:
string: Signed message
"""
pkey = OpenSSLCrypto.load_privatekey(
OpenSSLCrypto.FILETYPE_PEM, private_key, None)
sign = OpenSSL.crypto.sign(pkey, message, "sha256")
return sign.hex()
|
IntaSend/intasend-python | intasend/exceptions.py | class IntaSendBadRequest(Exception):
pass
class IntaSendNotAllowed(Exception):
pass
class IntaSendUnauthorized(Exception):
pass
class IntaSendServerError(Exception):
pass
|
IntaSend/intasend-python | intasend/devices.py | from .client import APIBase
class Devices(APIBase):
def details(self, device_id):
return self.send_request("GET", f"r'account-devices'/{device_id}", None)
def retrieve(self, device_id=None):
if device_id:
return self.details(device_id)
return self.send_request("GET", "r'account-devices'/", None)
|
IntaSend/intasend-python | intasend/wallets.py | <gh_stars>1-10
from .client import APIBase
from .collections import Collect
class Wallet(APIBase):
def __init__(self, **kwargs):
"""Wallets management service."""
self.collect = Collect(**kwargs)
super().__init__(**kwargs)
def details(self, wallet_id):
return self.send_request("GET", f"wallets/{wallet_id}", None)
def create(self, currency):
payload = {
"wallet_type": "WORKING",
"currency": currency
}
return self.send_request("POST", "wallets/", payload)
def retrieve(self, wallet_id=None):
if wallet_id:
return self.details(wallet_id)
return self.send_request("GET", "wallets/", None)
def transactions(self, wallet_id):
return self.send_request("GET", f"wallets/{wallet_id}/transactions", None)
def intra_transfer(self, origin_id, destination_id, amount, narrative):
payload = {
"wallet_id": destination_id,
"amount": amount,
"narrative": narrative
}
return self.send_request("POST", f"wallets/{origin_id}/intra_transfer/", payload=payload)
def fund(self, wallet_id, phone_number, email, amount, narrative, currency="KES", api_ref="API Request", name=None):
return self.collect.mpesa_stk_push(phone_number=phone_number, email=email, amount=amount, narrative=narrative, currency=currency, api_ref=api_ref, name=name)
|
IntaSend/intasend-python | intasend/client.py | <reponame>IntaSend/intasend-python<filename>intasend/client.py
import requests
from .exceptions import (IntaSendBadRequest, IntaSendNotAllowed,
IntaSendServerError, IntaSendUnauthorized)
def get_service_url(service_endpoint, test=False):
if test:
return f"https://sandbox.intasend.com/api/v1/{service_endpoint}"
return f"https://payment.intasend.com/api/v1/{service_endpoint}"
class APIBase(object):
def __init__(self, **kwargs):
"""API helper defination."""
self.token = kwargs.get("token")
self.publishable_key = kwargs.get("publishable_key")
self.private_key = kwargs.get("private_key")
self.test = kwargs.get("test", False)
if not self.token:
raise Exception("Authentication token is required")
super().__init__()
def send_request(self, request_type, service_endpoint, payload, noauth=False):
url = get_service_url(service_endpoint, self.test)
headers = self.get_headers()
resp = requests.request(
request_type, url, json=payload, headers=headers)
if resp.status_code == 400:
raise IntaSendBadRequest(resp.text)
elif resp.status_code == 403:
raise IntaSendNotAllowed(resp.text)
elif resp.status_code == 500:
raise IntaSendServerError(resp.text)
elif resp.status_code == 401:
raise IntaSendUnauthorized(resp.text)
return resp.json()
def get_headers(self):
return {
"Authorization": f"Bearer {self.token}"
}
|
IntaSend/intasend-python | examples/services.py | import secrets
import os
from intasend import APIService
# from intasend.utils import generate_keys
TEST_PRIVATE_KEY = os.environ.get("TEST_PRIVATE_KEY")
TEST_PHONE_NUMBER = os.environ.get("TEST_PHONE_NUMBER")
TEST_API_TOKEN = os.environ.get("TEST_API_TOKEN")
TEST_PUBLISHABLE_KEY = os.environ.get("TEST_PUBLISHABLE_KEY")
service = APIService(token=TEST_API_TOKEN,
publishable_key=TEST_PUBLISHABLE_KEY, private_key=TEST_PRIVATE_KEY, test=True)
if __name__ == "__main__":
print("Running service")
title = f"Link - {secrets.randbelow(20000)}"
response = service.payment_links.create(
title=title, currency="KES", amount=10)
print(response)
response = service.collect.mpesa_stk_push(phone_number=TEST_PHONE_NUMBER,
email="<EMAIL>", amount=10, narrative="Fees")
print(response)
response = service.wallets.retrieve()
print(response)
response = service.wallets.details("ZQMMOQO")
print(response)
response = service.wallets.transactions("ZQMMOQO")
print(response)
response = service.wallets.create("EUR")
print(response)
response = service.wallets.fund(
wallet_id="ZQMMOQO", phone_number=TEST_PHONE_NUMBER, email="<EMAIL>", amount=10, narrative="Fees", name="FELIX C")
print(response)
response = service.wallets.intra_transfer(
"ZQMMOQO", "XZY43Q8", 1, "Charge capture")
print(response)
response = service.chargebacks.retrieve("EYVBZR2")
print(response)
transactions = [{'name': 'test-name', 'account': TEST_PHONE_NUMBER, 'amount': 10},
{'name': 'test-name', 'account': TEST_PHONE_NUMBER, 'amount': 10000}]
response = service.transfer.mpesa(
device_id="KZQMORO", currency='KES', transactions=transactions)
print(response)
status = service.transfer.status(response.get("tracking_id"))
print(f"Status: {status}")
response = service.transfer.approve(response)
print(f"Approve response: {response}")
# private_key, public_key = generate_keys()
# print(private_key)
# print("x"*10)
# print(public_key)
response = service.collect.checkout(phone_number=TEST_PHONE_NUMBER,
email="<EMAIL>", amount=10, currency="KES", comment="Fees")
print(response)
response = service.collect.status(invoice_id="NR5XKGY")
print(response)
|
IntaSend/intasend-python | setup.py | from setuptools import setup
setup(
name='intasend-python',
packages=['intasend'],
version='1.0.1',
license='MIT',
description='Official Python SDK for IntaSend Payments Gateway API',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/IntaSend/intasend-python',
download_url='https://github.com/IntaSend/intasend-python/archive/v_1.0.1.tar.gz',
keywords=['payments', 'mpesa', 'card payments', 'visa',
'mastercard', 'payments kenya', 'intasend'],
install_requires=[
'requests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
]
)
|
IntaSend/intasend-python | intasend/payment_links.py | <reponame>IntaSend/intasend-python
from .client import APIBase
class PaymentLinks(APIBase):
def details(self, link_id):
return self.send_request("GET", f"paymentlinks/{link_id}", None)
def create(self, title, currency, amount=0, mobile_tarrif="BUSINESS-PAYS", card_tarrif="BUSINESS-PAYS", is_active=True, **kwargs):
payload = kwargs
payload["title"] = title
payload["currency"] = currency
payload["amount"] = amount
payload["mobile_tarrif"] = mobile_tarrif
payload["card_tarrif"] = card_tarrif
payload["is_active"] = is_active
return self.send_request("POST", "paymentlinks/", payload)
def retrieve(self, link_id=None):
if link_id:
return self.details(link_id)
return self.send_request("GET", "paymentlinks/", None)
def deactivate(self):
return self.send_request("GET", "paymentlinks/", None)
def update(self):
return self.send_request("GET", "paymentlinks/", None)
|
IntaSend/intasend-python | intasend/customers.py | <filename>intasend/customers.py
from .client import APIBase
class Customers(APIBase):
def details(self, customer_id):
return self.send_request("GET", f"customers/{customer_id}", None)
def create(self, email, **kwargs):
payload = kwargs
payload["email"] = email
return self.send_request("POST", "customers/", payload)
def retrieve(self, customer_id=None):
if customer_id:
return self.details(customer_id)
return self.send_request("GET", "customers/", None)
def transactions(self, customer_id):
return self.send_request("GET", f"customers/{customer_id}/transactions", None)
|
IntaSend/intasend-python | intasend/transfers.py | <reponame>IntaSend/intasend-python<filename>intasend/transfers.py
from .client import APIBase
from .utils import sign_message
class Transfer(APIBase):
def send_money(self, device_id, provider, currency, transactions, callback_url=None):
payload = {
"device_id": device_id,
"provider": provider,
"currency": currency,
"transactions": transactions,
"callback_url": callback_url
}
return self.send_request("POST", "send-money/initiate/", payload)
def approve(self, payload):
nonce = payload["nonce"]
signed_nonce = sign_message(self.private_key.encode("utf-8"), nonce)
payload["nonce"] = signed_nonce
return self.send_request("POST", "send-money/approve/", payload)
def status(self, tracking_id):
payload = {
"tracking_id": tracking_id
}
return self.send_request("POST", "send-money/status/", payload)
def mpesa(self, device_id, currency, transactions, callback_url=None):
provider = "MPESA-B2C"
return self.send_money(device_id, provider, currency, transactions, callback_url)
def mpesa_b2b(self, device_id, currency, transactions, callback_url=None):
provider = "MPESA-B2B"
return self.send_money(device_id, provider, currency, transactions, callback_url)
def intasend(self, device_id, currency, transactions, callback_url=None):
provider = "INTASEND"
return self.send_money(device_id, provider, currency, transactions, callback_url)
def bank(self, device_id, currency, transactions, callback_url=None):
provider = "PESALINK"
return self.send_money(device_id, provider, currency, transactions, callback_url)
|
IntaSend/intasend-python | intasend/collections.py | <gh_stars>1-10
from .client import APIBase
class Collect(APIBase):
def checkout(self, email, amount, currency, **kwargs):
"""
Generates payment checkout URL
Args:
email (string): Customer email
amount (float): Total amount billed
currency (string): Currency code (KES, USD, EUR, GBP)
Returns:
object: JSON dictionary with a checkout url field
"""
method = kwargs.get("method")
api_ref = kwargs.get("api_ref", "API Request")
callback_url = kwargs.get("callback_url")
comment = kwargs.get("comment")
first_name = kwargs.get("first_name")
last_name = kwargs.get("last_name")
phone_number = kwargs.get("phone_number")
mobile_tarrif = kwargs.get("mobile_tarrif", "BUSINESS-PAYS")
card_tarrif = kwargs.get("card_tarrif", "BUSINESS-PAYS")
payload = {
"public_key": self.publishable_key,
"currency": currency,
"email": email,
"amount": amount,
"method": method,
"api_ref": api_ref,
"callback_url": callback_url,
"comment": comment,
"first_name": first_name,
"last_name": last_name,
"phone_number": phone_number,
"mobile_tarrif": mobile_tarrif,
"card_tarrif": card_tarrif,
"version": "3.0.0"
}
print(payload)
return self.send_request("POST", "checkout/", payload)
def status(self, invoice_id, checkout_id=None, signature=None):
"""
Check status of transaction/invoice
Args:
invoice_id (string): Invoice or tracking ID
checkout_id (string, optional): [Checkout id for payments requests through checkout API]. Defaults to None.
signature (string, optional): [JWT signature for payments requests through checkout API]. Defaults to None.
Returns:
object: JSON with transaction details
"""
payload = {
"invoice_id": invoice_id,
"public_key": self.publishable_key,
}
if checkout_id and signature:
payload = {
"invoice_id": invoice_id,
"signature": signature,
"checkout_id": checkout_id,
}
return self.send_request("POST", "payment/status/", payload)
def mpesa_stk_push(self, phone_number, amount, narrative, currency="KES", api_ref="API Request", name=None, email=None):
payload = {
"public_key": self.publishable_key,
"currency": currency,
"method": "M-PESA",
"amount": amount,
"phone_number": phone_number,
"api_ref": api_ref,
"name": name,
"email": email
}
return self.send_request("POST", "payment/collection/", payload)
def get_quote(self, amount, method, currency="KES", tarrif="BUSINESS-PAYS"):
payload = {
"public_key": self.publishable_key,
"currency": currency,
"method": method,
"tarrif": tarrif
}
return self.send_request("POST", "payment/get_amount_estimate/", payload)
|
IntaSend/intasend-python | intasend/chargebacks.py | <reponame>IntaSend/intasend-python<filename>intasend/chargebacks.py<gh_stars>1-10
from .client import APIBase
class Chagebacks(APIBase):
def create(self, invoice, amount, reason, reason_details=None):
payload = {
"invoice": invoice,
"amount": amount,
"reason": reason,
"reason_details": reason_details
}
return self.send_request("POST", "chargebacks/", payload)
def retrieve(self, chargeback_id=None):
if chargeback_id:
return self.details(chargeback_id)
return self.send_request("GET", "chargebacks/", {})
def details(self, chargeback_id=None):
return self.send_request("GET", f"chargebacks/{chargeback_id}/", {})
|
IntaSend/intasend-python | intasend/__init__.py | """IntaSend Payments API for Python 3."""
from .collections import Collect
from .transfers import Transfer
from .wallets import Wallet
from .chargebacks import Chagebacks
from .payment_links import PaymentLinks
from .devices import Devices
from .customers import Customers
class APIService:
def __init__(self, **kwargs):
"""API Services Initialization."""
self.collect = Collect(**kwargs)
self.transfer = Transfer(**kwargs)
self.wallets = Wallet(**kwargs)
self.chargebacks = Chagebacks(**kwargs)
self.customers = Customers(**kwargs)
self.devices = Devices(**kwargs)
self.payment_links = PaymentLinks(**kwargs)
|
sgalpha01/biopython | Bio/Align/mauve.py | <reponame>sgalpha01/biopython<filename>Bio/Align/mauve.py
# Copyright 2015-2015 by <NAME>. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.Align support for "xmfa" output from Mauve/ProgressiveMauve.
You are expected to use this module via the Bio.Align functions.
"""
from Bio.Align import interfaces, Alignment
from Bio.Seq import Seq, reverse_complement
from Bio.SeqRecord import SeqRecord
from Bio import BiopythonExperimentalWarning
import warnings
warnings.warn(
"Bio.Align.mauve is an experimental module which may undergo "
"significant changes prior to its future official release.",
BiopythonExperimentalWarning,
)
class AlignmentWriter(interfaces.AlignmentWriter):
"""Mauve/XMFA alignment writer."""
def write_header(self, alignments):
"""Write the file header to the output file."""
stream = self.stream
metadata = alignments.metadata
format_version = metadata.get("FormatVersion", "Mauve1")
line = f"#FormatVersion {format_version}\n"
stream.write(line)
alignment = alignments[0]
# first alignment always seems to contain all sequences
identifiers = [sequence.id for sequence in alignment.sequences]
filename = metadata.get("File")
if filename is None:
# sequences came from separate files
for index, filename in enumerate(identifiers):
number = index + 1
line = f"#Sequence{number}File\t{filename}\n"
stream.write(line)
line = f"#Sequence{number}Format\tFastA\n"
stream.write(line)
self._filenames = identifiers
else:
# sequences came from one combined file
for number, identifier in enumerate(identifiers):
assert number == int(identifier)
number += 1
line = f"#Sequence{number}File\t{filename}\n"
stream.write(line)
line = f"#Sequence{number}Entry\t{number}\n"
stream.write(line)
line = f"#Sequence{number}Format\tFastA\n"
stream.write(line)
backbone_file = metadata.get("BackboneFile", None)
if backbone_file is not None:
line = f"#BackboneFile\t{backbone_file}\n"
stream.write(line)
def write_file(self, alignments):
"""Write a file with the alignments, and return the number of alignments.
alignments - A Bio.Align.mauve.AlignmentIterator object.
"""
class ListWithAttributes(list):
pass
try:
metadata = alignments.metadata
except AttributeError:
metadata = {}
alignments = ListWithAttributes(alignments)
alignments.metadata = metadata
self._filename = metadata.get("File")
count = interfaces.AlignmentWriter.write_file(self, alignments)
return count
def format_alignment(self, alignment):
"""Return a string with a single alignment in the Mauve format."""
n, m = alignment.shape
if n == 0:
raise ValueError("Must have at least one sequence")
if m == 0:
raise ValueError("Non-empty sequences are required")
filename = self._filename
lines = []
for i in range(n):
identifier = alignment.sequences[i].id
start = alignment.coordinates[i, 0]
end = alignment.coordinates[i, -1]
if start <= end:
strand = "+"
else:
strand = "-"
start, end = end, start
if start == end:
assert start == 0
else:
start += 1 # switch to 1-based counting
sequence = alignment[i]
if filename is None:
number = (
self._filenames.index(identifier) + 1
) # Switch to 1-based counting
line = f"> {number}:{start}-{end} {strand} {identifier}\n"
else:
number = int(identifier) + 1 # Switch to 1-based counting
line = f"> {number}:{start}-{end} {strand} {filename}\n"
lines.append(line)
line = f"{sequence}\n"
lines.append(line)
lines.append("=\n")
return "".join(lines)
class AlignmentIterator(interfaces.AlignmentIterator):
"""Mauve xmfa alignment iterator."""
def __init__(self, source):
"""Create an AlignmentIterator object.
Arguments:
- source - input data or file name
"""
super().__init__(source, mode="t", fmt="Mauve")
stream = self.stream
metadata = {}
prefix = "Sequence"
suffixes = ("File", "Entry", "Format")
id_info = {}
for suffix in suffixes:
id_info[suffix] = []
for line in stream:
if not line.startswith("#"):
self._line = line.strip()
break
key, value = line[1:].split()
if key.startswith(prefix):
for suffix in suffixes:
if key.endswith(suffix):
break
else:
raise ValueError("Unexpected keyword '%s'" % key)
if suffix == "Entry":
value = int(value) - 1 # Switch to 0-based counting
seq_num = int(key[len(prefix) : -len(suffix)])
id_info[suffix].append(value)
assert seq_num == len(id_info[suffix]) # Mauve uses 1-based counting
else:
metadata[key] = value.strip()
else:
if not metadata:
raise ValueError("Empty file.") from None
if len(set(id_info["File"])) == 1:
# A single file containing all sequences was provided as input;
# store the file name once, and use the entry number as ID
metadata["File"] = id_info["File"][0]
self._identifiers = [str(entry) for entry in id_info["Entry"]]
else:
assert len(set(id_info["File"])) == len(id_info["File"])
# Separate files for each of the sequences were provided as input;
# use the sequence file as ID
self._identifiers = id_info["File"]
self.metadata = metadata
def _parse_description(self, line):
assert line.startswith(">")
locus, strand, comments = line[1:].split(None, 2)
seq_num, start_end = locus.split(":")
seq_num = int(seq_num) - 1 # python counting
identifier = self._identifiers[seq_num]
assert strand in "+-"
start, end = start_end.split("-")
start = int(start)
end = int(end)
if start == 0:
assert end == 0 # unaligned sequence
else:
start -= 1 # python counting
return (identifier, start, end, strand, comments)
def parse(self, stream):
"""Parse the next alignment from the stream."""
if stream is None:
return
descriptions = []
seqs = []
line = self._line
del self._line
description = self._parse_description(line)
identifier, start, end, strand, comments = description
descriptions.append(description)
seqs.append("")
for line in stream:
line = line.strip()
if line.startswith("="):
# There may be more data, but we've reached the end of this
# alignment
coordinates = Alignment.infer_coordinates(seqs)
records = []
for index, (description, seq) in enumerate(zip(descriptions, seqs)):
identifier, start, end, strand, comments = description
length = end - start
seq = seq.replace("-", "")
assert len(seq) == end - start
if strand == "+":
pass
elif strand == "-":
seq = reverse_complement(seq, inplace=False)
coordinates[index, :] = len(seq) - coordinates[index, :]
else:
raise ValueError("Unexpected strand '%s'" % strand)
coordinates[index] += start
if start == 0:
seq = Seq(seq)
else:
seq = Seq({start: seq}, length=end)
record = SeqRecord(seq, id=identifier, description=comments)
records.append(record)
yield Alignment(records, coordinates)
descriptions = []
seqs = []
elif line.startswith(">"):
description = self._parse_description(line)
identifier, start, end, strand, comments = description
descriptions.append(description)
seqs.append("")
else:
seqs[-1] += line
|
sgalpha01/biopython | Tests/test_Align_bed.py | <reponame>sgalpha01/biopython<filename>Tests/test_Align_bed.py
# Copyright 2022 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Align.bed module."""
import unittest
import os
import warnings
from io import StringIO
from Bio.Align import Alignment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align import bed
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.bed."
) from None
class TestAlign_dna_rna(unittest.TestCase):
# The BED file dna_rna.bed was generated using this command:
# pslToBed dna_rna.psl dna_rna.bed
def setUp(self):
data = {}
records = SeqIO.parse("Blat/dna.fa", "fasta")
for record in records:
name, start_end = record.id.split(":")
assert name == "chr3"
start, end = start_end.split("-")
start = int(start)
end = int(end)
sequence = str(record.seq)
assert len(sequence) == end - start
data[start] = sequence
self.dna = Seq(data, length=198295559) # hg38 chr3
records = SeqIO.parse("Blat/rna.fa", "fasta")
self.rna = {record.id: record.seq for record in records}
def test_reading(self):
"""Test parsing dna_rna.bed."""
path = "Blat/dna_rna.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 5407))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_111921.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48663767, 48663813, 48665640,
48665722, 48669098, 48669174],
[ 0, 46, 46,
128, 128, 204]])
# fmt: on
)
)
alignment.target.seq = self.dna
alignment.query.seq = self.rna[alignment.query.id]
self.assertTrue(
numpy.array_equal(
alignment.substitutions,
# fmt: off
# flake8: noqa
numpy.array([[53., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 35., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 50., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 27., 0., 0., 0., 0.],
[ 9., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 7., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 16., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 7., 0., 0., 0., 0.],
])
)
)
self.assertEqual(alignment.substitutions.alphabet, "ACGTacgt")
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 1711))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_046654.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42530895, 42530958, 42532020,
42532095, 42532563, 42532606],
[ 181, 118, 118,
43, 43, 0]])
# fmt: on
)
)
alignment.target.seq = self.dna
alignment.query.seq = self.rna[alignment.query.id]
self.assertTrue(
numpy.array_equal(
alignment.substitutions,
# fmt: off
# flake8: noqa
numpy.array([[36., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 40., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 57., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 42., 0., 0., 0., 0.],
[ 2., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 3., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
])
)
)
self.assertEqual(alignment.substitutions.alphabet, "ACGTacgt")
alignment = next(alignments)
self.assertEqual(alignment.score, 992)
self.assertEqual(alignment.shape, (2, 5407))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_111921.1_modified")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48663767, 48663795, 48663796, 48663813, 48665640,
48665716, 48665722, 48669098, 48669174],
[ 0, 28, 28, 45, 45,
121, 127, 127, 203]])
# fmt: on
)
)
# The modified RNAs have gaps in their sequence. As this information is
# not stored in a BED file, we cannot calculate the substitution matrix.
alignment = next(alignments)
self.assertEqual(alignment.score, 990)
self.assertEqual(alignment.shape, (2, 1711))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr3")
self.assertEqual(alignment.query.id, "NR_046654.1_modified")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42530895, 42530922, 42530958, 42532020, 42532037,
42532039, 42532095, 42532563, 42532606],
[ 179, 152, 116, 116, 99,
99, 43, 43, 0]])
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing(self):
"""Test writing the alignments in dna_rna.bed."""
path = "Blat/dna_rna.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=4, maxcount=4)
self.assertEqual(n, 4)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_dna(unittest.TestCase):
def test_reading_psl_34_001(self):
"""Test parsing psl_34_001.bed."""
path = "Blat/psl_34_001.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_001(self):
"""Test writing the alignments in psl_34_001.bed."""
path = "Blat/psl_34_001.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=22, maxcount=22)
self.assertEqual(n, 22)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_003(self):
"""Test parsing psl_34_003.bed."""
path = "Blat/psl_34_003.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_003(self):
"""Test writing the alignments in psl_34_003.bed."""
path = "Blat/psl_34_003.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=3, maxcount=3)
self.assertEqual(n, 3)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_004(self):
"""Test parsing psl_34_004.bed."""
path = "Blat/psl_34_004.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_004(self):
"""Test writing the alignments in psl_34_004.bed."""
path = "Blat/psl_34_004.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=19, maxcount=19)
self.assertEqual(n, 19)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_34_005(self):
"""Test parsing psl_34_005.bed."""
path = "Blat/psl_34_005.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 16))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61646095, 61646111],
[ 0, 16]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 33))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[10271783, 10271816],
[ 0, 33]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 17))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg18_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[53575980, 53575997],
[ 17, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 854)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr9")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[85737865, 85737906],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 41))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr8")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[95160479, 95160520],
[ 0, 41]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[42144400, 42144436],
[ 0, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 682)
self.assertEqual(alignment.shape, (2, 44))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[183925984, 183925990, 183926028],
[ 0, 6, 44]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 890)
self.assertEqual(alignment.shape, (2, 170))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[35483340, 35483365, 35483499, 35483510],
[ 0, 25, 25, 36]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[23891310, 23891349],
[ 0, 39]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 930)
self.assertEqual(alignment.shape, (2, 28))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr18")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[43252217, 43252245],
[ 0, 28]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 912)
self.assertEqual(alignment.shape, (2, 51))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[52759147, 52759154, 52759160, 52759198],
[ 0, 7, 7, 45]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 50))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1207056, 1207106],
[ 0, 50]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 824)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[61700837, 61700871],
[ 0, 34]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 572)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[37558157, 37558167, 37558173, 37558191],
[ 28, 18, 18, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 892)
self.assertEqual(alignment.shape, (2, 37))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr22")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[48997405, 48997442],
[ 37, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr2")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[120641740, 120641776],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[54017130, 54017169],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 848)
self.assertEqual(alignment.shape, (2, 39))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr19")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[553742, 553781],
[ 39, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 834)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[99388555, 99388591],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 920)
self.assertEqual(alignment.shape, (2, 25))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr10")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[112178171, 112178196],
[ 25, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 946)
self.assertEqual(alignment.shape, (2, 36))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
numpy.array([[39368490, 39368526],
[ 36, 0]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 942)
self.assertEqual(alignment.shape, (2, 34))
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr1")
self.assertEqual(alignment.query.id, "hg19_dna")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[220325687, 220325721],
[ 34, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_34_005(self):
"""Test writing the alignments in psl_34_005.bed."""
path = "Blat/psl_34_005.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=22, maxcount=22)
self.assertEqual(n, 22)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_dnax_prot(unittest.TestCase):
def test_reading_psl_35_001(self):
"""Test parsing psl_35_001.bed."""
path = "Blat/psl_35_001.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75566694, 75566850],
[ 0, 156]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75560749, 75560881],
[ 0, 132]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 986)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75549820, 75549865, 75567225, 75567312],
[ 0, 45, 45, 132]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75604767, 75604827, 75605728, 75605809],
[ 0, 60, 60, 141]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75594914, 75594989],
[ 0, 75]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 1000)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr13")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[75569459, 75569507],
[ 0, 48]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 530)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[41260685, 41260787],
[ 0, 102]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 166)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "chr4")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[41257605, 41257731, 41263227, 41263290],
[ 0, 126, 126, 189]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_35_001(self):
"""Test writing the alignments in psl_35_001.bed."""
path = "Blat/psl_35_001.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=8, maxcount=8)
self.assertEqual(n, 8)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
def test_reading_psl_35_002(self):
"""Test parsing psl_35_002.bed."""
path = "Blat/psl_35_002.bed"
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
self.assertEqual(alignment.score, 972)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI537979")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[9712654, 9712786, 9715941, 9716097, 9716445, 9716532, 9718374,
9718422, 9739264, 9739339, 9743706, 9743766, 9744511, 9744592],
[ 0, 132, 132, 288, 288, 375, 375,
423, 423, 498, 498, 558, 558, 639]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 792)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertLess(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI538594")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[2103463, 2103523, 2103522, 2104149],
[ 0, 60, 60, 687]]),
# fmt: on
)
)
alignment = next(alignments)
self.assertEqual(alignment.score, 902)
self.assertLess(alignment.coordinates[0, 0], alignment.coordinates[0, -1])
self.assertGreater(alignment.coordinates[1, 0], alignment.coordinates[1, -1])
self.assertEqual(len(alignment), 2)
self.assertIs(alignment.sequences[0], alignment.target)
self.assertIs(alignment.sequences[1], alignment.query)
self.assertEqual(alignment.target.id, "KI537194")
self.assertEqual(alignment.query.id, "CAG33136.1")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[20872390, 20872471, 20872472, 20873021],
[ 630, 549, 549, 0]]),
# fmt: on
)
)
self.assertRaises(StopIteration, next, alignments)
def test_writing_psl_35_002(self):
"""Test writing the alignments in psl_35_002.bed."""
path = "Blat/psl_35_002.bed"
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=3, maxcount=3)
self.assertEqual(n, 3)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data)
class TestAlign_bed12(unittest.TestCase):
def test_reading(self):
"""Test parsing alignments in file formats BED3 through BED12."""
for bedN in (3, 4, 5, 6, 7, 8, 9, 12):
filename = "bed%d.bed" % bedN
path = os.path.join("Blat", filename)
alignments = bed.AlignmentIterator(path)
alignment = next(alignments)
if bedN >= 5:
self.assertEqual(alignment.score, 960, msg=filename)
self.assertEqual(alignment.shape, (2, 4000), msg=filename)
self.assertLess(
alignment.coordinates[0, 0], alignment.coordinates[0, -1], msg=filename
)
self.assertLess(
alignment.coordinates[1, 0], alignment.coordinates[1, -1], msg=filename
)
self.assertEqual(len(alignment), 2, msg=filename)
self.assertIs(alignment.sequences[0], alignment.target, msg=filename)
self.assertIs(alignment.sequences[1], alignment.query, msg=filename)
self.assertEqual(alignment.target.id, "chr22", msg=filename)
if bedN >= 4:
self.assertEqual(alignment.query.id, "mRNA1", msg=filename)
else:
self.assertIsNone(alignment.query.id, msg=filename)
if bedN == 12:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[1000, 1567, 4512, 5000],
[ 0, 567, 567, 1055]]),
# fmt: on
),
msg=filename,
)
else:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[1000, 5000], [0, 4000]]),
),
msg=filename,
)
if bedN >= 7:
self.assertEqual(alignment.thickStart, 1200, msg=filename)
if bedN >= 8:
self.assertEqual(alignment.thickEnd, 5900, msg=filename)
if bedN >= 9:
self.assertEqual(alignment.itemRgb, "255,0,0", msg=filename)
alignment = next(alignments)
if bedN >= 5:
self.assertEqual(alignment.score, 900, msg=filename)
self.assertEqual(alignment.shape, (2, 4000), msg=filename)
self.assertLess(
alignment.coordinates[0, 0], alignment.coordinates[0, -1], msg=filename
)
if bedN >= 6:
self.assertGreater(
alignment.coordinates[1, 0],
alignment.coordinates[1, -1],
msg=filename,
)
else:
self.assertLess(
alignment.coordinates[1, 0],
alignment.coordinates[1, -1],
msg=filename,
)
self.assertEqual(len(alignment), 2, msg=filename)
self.assertIs(alignment.sequences[0], alignment.target, msg=filename)
self.assertIs(alignment.sequences[1], alignment.query, msg=filename)
self.assertEqual(alignment.target.id, "chr22", msg=filename)
if bedN >= 4:
self.assertEqual(alignment.query.id, "mRNA2", msg=filename)
else:
self.assertIsNone(alignment.query.id, msg=filename)
if bedN == 12:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
# fmt: off
# flake8: noqa
numpy.array([[2000, 2433, 5601, 6000],
[ 832, 399, 399, 0]])
# fmt: on
),
msg=filename,
)
elif bedN >= 6:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[2000, 6000], [4000, 0]]),
),
msg=filename,
)
else:
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[2000, 6000], [0, 4000]]),
),
msg=filename,
)
if bedN >= 7:
self.assertEqual(alignment.thickStart, 2300, msg=filename)
if bedN >= 8:
self.assertEqual(alignment.thickEnd, 5960, msg=filename)
if bedN >= 9:
self.assertEqual(alignment.itemRgb, "0,255,0", msg=filename)
with self.assertRaises(StopIteration) as cm:
next(alignments)
self.fail(f"More than two alignments reported in {filename}")
def test_writing(self):
"""Test writing the alignments in bed12.bed as BED3 through BED12."""
for bedN in (3, 4, 5, 6, 7, 8, 9, 12):
filename = "bed%d.bed" % bedN
path = os.path.join("Blat", filename)
with open(path) as stream:
original_data = stream.read()
alignments = bed.AlignmentIterator(path)
stream = StringIO()
writer = bed.AlignmentWriter(stream, bedN=bedN)
n = writer.write_file(alignments, mincount=2, maxcount=2)
self.assertEqual(n, 2, msg=filename)
stream.seek(0)
written_data = stream.read()
stream.close()
self.assertEqual(original_data, written_data, msg=filename)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
sgalpha01/biopython | Bio/PDB/PDBIO.py | <reponame>sgalpha01/biopython<gh_stars>0
# Copyright (C) 2002, <NAME> (<EMAIL>)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Output of PDB files."""
import warnings
# Exceptions and Warnings
from Bio import BiopythonWarning
from Bio.PDB.PDBExceptions import PDBIOException
# To allow saving of chains, residues, etc..
from Bio.PDB.StructureBuilder import StructureBuilder
# Allowed Elements
from Bio.Data.IUPACData import atom_weights
_ATOM_FORMAT_STRING = (
"%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f%s%6.2f %4s%2s%2s\n"
)
_PQR_ATOM_FORMAT_STRING = (
"%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f %7s %6s %2s\n"
)
_TER_FORMAT_STRING = (
"TER %5i %3s %c%4i%c \n"
)
class Select:
"""Select everything for PDB output (for use as a base class).
Default selection (everything) during writing - can be used as base class
to implement selective output. This selects which entities will be written out.
"""
def __repr__(self):
"""Represent the output as a string for debugging."""
return "<Select all>"
def accept_model(self, model):
"""Overload this to reject models for output."""
return 1
def accept_chain(self, chain):
"""Overload this to reject chains for output."""
return 1
def accept_residue(self, residue):
"""Overload this to reject residues for output."""
return 1
def accept_atom(self, atom):
"""Overload this to reject atoms for output."""
return 1
_select = Select()
class StructureIO:
"""Base class to derive structure file format writers from."""
def __init__(self):
"""Initialise."""
pass
def set_structure(self, pdb_object):
"""Check what the user is providing and build a structure."""
# The idea here is to build missing upstream components of
# the SMCRA object representation. E.g., if the user provides
# a Residue, build Structure/Model/Chain.
if pdb_object.level == "S":
structure = pdb_object
else: # Not a Structure
sb = StructureBuilder()
sb.init_structure("pdb")
sb.init_seg(" ")
if pdb_object.level == "M":
sb.structure.add(pdb_object.copy())
self.structure = sb.structure
else: # Not a Model
sb.init_model(0)
if pdb_object.level == "C":
sb.structure[0].add(pdb_object.copy())
else: # Not a Chain
chain_id = "A" # default
sb.init_chain(chain_id)
if pdb_object.level == "R": # Residue
# Residue extracted from a larger structure?
if pdb_object.parent is not None:
og_chain_id = pdb_object.parent.id
sb.structure[0][chain_id].id = og_chain_id
chain_id = og_chain_id
sb.structure[0][chain_id].add(pdb_object.copy())
else: # Atom
sb.init_residue("DUM", " ", 1, " ") # Dummy residue
sb.structure[0][chain_id].child_list[0].add(pdb_object.copy())
# Fix chain identifier if Atom has grandparents.
try:
og_chain_id = pdb_object.parent.parent.id
except AttributeError: # pdb_object.parent == None
pass
else:
sb.structure[0][chain_id].id = og_chain_id
# Return structure
structure = sb.structure
self.structure = structure
class PDBIO(StructureIO):
"""Write a Structure object (or a subset of a Structure object) as a PDB or PQR file.
Examples
--------
>>> from Bio.PDB import PDBParser
>>> from Bio.PDB.PDBIO import PDBIO
>>> parser = PDBParser()
>>> structure = parser.get_structure("1a8o", "PDB/1A8O.pdb")
>>> io=PDBIO()
>>> io.set_structure(structure)
>>> io.save("bio-pdb-pdbio-out.pdb")
>>> import os
>>> os.remove("bio-pdb-pdbio-out.pdb") # tidy up
"""
def __init__(self, use_model_flag=0, is_pqr=False):
"""Create the PDBIO object.
:param use_model_flag: if 1, force use of the MODEL record in output.
:type use_model_flag: int
:param is_pqr: if True, build PQR file. Otherwise build PDB file.
:type is_pqr: Boolean
"""
self.use_model_flag = use_model_flag
self.is_pqr = is_pqr
# private methods
def _get_atom_line(
self,
atom,
hetfield,
segid,
atom_number,
resname,
resseq,
icode,
chain_id,
charge=" ",
):
"""Return an ATOM PDB string (PRIVATE)."""
if hetfield != " ":
record_type = "HETATM"
else:
record_type = "ATOM "
# Atom properties
# Check if the atom serial number is an integer
# Not always the case for structures built from
# mmCIF files.
try:
atom_number = int(atom_number)
except ValueError:
raise ValueError(
f"{atom_number!r} is not a number."
"Atom serial numbers must be numerical"
" If you are converting from an mmCIF"
" structure, try using"
" preserve_atom_numbering=False"
)
if atom_number > 99999:
raise ValueError(
f"Atom serial number ('{atom_number}') exceeds PDB format limit."
)
# Check if the element is valid, unknown (X), or blank
if atom.element:
element = atom.element.strip().upper()
if element.capitalize() not in atom_weights and element != "X":
raise ValueError(f"Unrecognised element {atom.element}")
element = element.rjust(2)
else:
element = " "
# Format atom name
# Pad if:
# - smaller than 4 characters
# AND - is not C, N, O, S, H, F, P, ..., one letter elements
# AND - first character is NOT numeric (funky hydrogen naming rules)
name = atom.fullname.strip()
if len(name) < 4 and name[:1].isalpha() and len(element.strip()) < 2:
name = " " + name
altloc = atom.altloc
x, y, z = atom.coord
# Write PDB format line
if not self.is_pqr:
bfactor = atom.bfactor
try:
occupancy = f"{atom.occupancy:6.2f}"
except (TypeError, ValueError):
if atom.occupancy is None:
occupancy = " " * 6
warnings.warn(
f"Missing occupancy in atom {atom.full_id!r} written as blank",
BiopythonWarning,
)
else:
raise ValueError(
f"Invalid occupancy value: {atom.occupancy!r}"
) from None
args = (
record_type,
atom_number,
name,
altloc,
resname,
chain_id,
resseq,
icode,
x,
y,
z,
occupancy,
bfactor,
segid,
element,
charge,
)
return _ATOM_FORMAT_STRING % args
# Write PQR format line
else:
try:
pqr_charge = f"{atom.pqr_charge:7.4f}"
except (TypeError, ValueError):
if atom.pqr_charge is None:
pqr_charge = " " * 7
warnings.warn(
f"Missing PQR charge in atom {atom.full_id} written as blank",
BiopythonWarning,
)
else:
raise ValueError(
f"Invalid PQR charge value: {atom.pqr_charge!r}"
) from None
try:
radius = f"{atom.radius:6.4f}"
except (TypeError, ValueError):
if atom.radius is None:
radius = " " * 6
warnings.warn(
f"Missing radius in atom {atom.full_id} written as blank",
BiopythonWarning,
)
else:
raise ValueError(f"Invalid radius value: {atom.radius}") from None
args = (
record_type,
atom_number,
name,
altloc,
resname,
chain_id,
resseq,
icode,
x,
y,
z,
pqr_charge,
radius,
element,
)
return _PQR_ATOM_FORMAT_STRING % args
# Public methods
def save(self, file, select=_select, write_end=True, preserve_atom_numbering=False):
"""Save structure to a file.
:param file: output file
:type file: string or filehandle
:param select: selects which entities will be written.
:type select: object
Typically select is a subclass of L{Select}, it should
have the following methods:
- accept_model(model)
- accept_chain(chain)
- accept_residue(residue)
- accept_atom(atom)
These methods should return 1 if the entity is to be
written out, 0 otherwise.
Typically select is a subclass of L{Select}.
"""
if isinstance(file, str):
fhandle = open(file, "w")
else:
# filehandle, I hope :-)
fhandle = file
with fhandle:
get_atom_line = self._get_atom_line
# multiple models?
if len(self.structure) > 1 or self.use_model_flag:
model_flag = 1
else:
model_flag = 0
for model in self.structure.get_list():
if not select.accept_model(model):
continue
# necessary for ENDMDL
# do not write ENDMDL if no residues were written
# for this model
model_residues_written = 0
if not preserve_atom_numbering:
atom_number = 1
if model_flag:
fhandle.write(f"MODEL {model.serial_num}\n")
for chain in model.get_list():
if not select.accept_chain(chain):
continue
chain_id = chain.id
if len(chain_id) > 1:
e = f"Chain id ('{chain_id}') exceeds PDB format limit."
raise PDBIOException(e)
# necessary for TER
# do not write TER if no residues were written
# for this chain
chain_residues_written = 0
for residue in chain.get_unpacked_list():
if not select.accept_residue(residue):
continue
hetfield, resseq, icode = residue.id
resname = residue.resname
segid = residue.segid
resid = residue.id[1]
if resid > 9999:
e = f"Residue number ('{resid}') exceeds PDB format limit."
raise PDBIOException(e)
for atom in residue.get_unpacked_list():
if not select.accept_atom(atom):
continue
chain_residues_written = 1
model_residues_written = 1
if preserve_atom_numbering:
atom_number = atom.serial_number
try:
s = get_atom_line(
atom,
hetfield,
segid,
atom_number,
resname,
resseq,
icode,
chain_id,
)
except Exception as err:
# catch and re-raise with more information
raise PDBIOException(
f"Error when writing atom {atom.full_id}"
) from err
else:
fhandle.write(s)
# inconsequential if preserve_atom_numbering is True
atom_number += 1
if chain_residues_written:
fhandle.write(
_TER_FORMAT_STRING
% (atom_number, resname, chain_id, resseq, icode)
)
if model_flag and model_residues_written:
fhandle.write("ENDMDL\n")
if write_end:
fhandle.write("END \n")
|
sgalpha01/biopython | Tests/test_Align_msf.py | # Copyright 2021 by <NAME>. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.msf module."""
import unittest
import warnings
from Bio import BiopythonParserWarning
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align.msf import AlignmentIterator
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.msf."
) from None
class TestMSF(unittest.TestCase):
def test_protein1(self):
path = "msf/W_prot.msf"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 11)
self.assertEqual(alignment.shape, (11, 99))
self.assertEqual(alignment.sequences[0].id, "W*01:01:01:01")
self.assertEqual(alignment.sequences[1].id, "W*01:01:01:02")
self.assertEqual(alignment.sequences[2].id, "W*01:01:01:03")
self.assertEqual(alignment.sequences[3].id, "W*01:01:01:04")
self.assertEqual(alignment.sequences[4].id, "W*01:01:01:05")
self.assertEqual(alignment.sequences[5].id, "W*01:01:01:06")
self.assertEqual(alignment.sequences[6].id, "W*02:01")
self.assertEqual(alignment.sequences[7].id, "W*03:01:01:01")
self.assertEqual(alignment.sequences[8].id, "W*03:01:01:02")
self.assertEqual(alignment.sequences[9].id, "W*04:01")
self.assertEqual(alignment.sequences[10].id, "W*05:01")
self.assertEqual(
alignment.sequences[0].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[1].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[2].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[3].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[4].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[5].seq,
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment.sequences[6].seq,
"GLTPSNGYTAATWTRTAASSVGMNIPYDGASYLVRNQELRSWTAADKAAQMPWRRNMQSCSKPTCREGGRSGSAKSLRMGRRRCTAQNPKRLT",
)
self.assertEqual(
alignment.sequences[7].seq,
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT",
)
self.assertEqual(
alignment.sequences[8].seq,
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT",
)
self.assertEqual(
alignment.sequences[9].seq,
"GLTPSNGYTAATWTRTAASSVGMNIPYDGASYLVRNQELRSWTAADKAAQMPWRRNMQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT",
)
self.assertEqual(
alignment.sequences[10].seq,
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[0, 93, 99],
[0, 93, 99],
[0, 93, 99],
[0, 93, 99],
[0, 93, 99],
[0, 93, 99],
[0, 93, 93],
[0, 93, 93],
[0, 93, 93],
[0, 93, 93],
[0, 93, 99],
]
),
)
)
self.assertEqual(
alignment[0],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[1],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[2],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[3],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[4],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[5],
"GLTPFNGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
self.assertEqual(
alignment[6],
"GLTPSNGYTAATWTRTAASSVGMNIPYDGASYLVRNQELRSWTAADKAAQMPWRRNMQSCSKPTCREGGRSGSAKSLRMGRRRCTAQNPKRLT------",
)
self.assertEqual(
alignment[7],
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT------",
)
self.assertEqual(
alignment[8],
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT------",
)
self.assertEqual(
alignment[9],
"GLTPSNGYTAATWTRTAASSVGMNIPYDGASYLVRNQELRSWTAADKAAQMPWRRNMQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKRLT------",
)
self.assertEqual(
alignment[10],
"GLTPSSGYTAATWTRTAVSSVGMNIPYHGASYLVRNQELRSWTAADKAAQMPWRRNRQSCSKPTCREGGRSGSAKSLRMGRRGCSAQNPKDSHDPPPHL",
)
def test_protein2(self):
path = "msf/DOA_prot.msf"
with warnings.catch_warnings(record=True) as w:
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignments = list(alignments)
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, BiopythonParserWarning)
self.assertEqual(
str(w[0].message), "GCG MSF headers said alignment length 62, but found 250"
)
self.assertEqual(len(alignments), 1)
alignment = alignments[0]
self.assertEqual(len(alignment), 12)
self.assertEqual(alignment.shape, (12, 250))
self.assertEqual(alignment.sequences[0].id, "DOA*01:01:01")
self.assertEqual(alignment.sequences[1].id, "DOA*01:01:02:01")
self.assertEqual(alignment.sequences[2].id, "DOA*01:01:02:02")
self.assertEqual(alignment.sequences[3].id, "DOA*01:01:02:03")
self.assertEqual(alignment.sequences[4].id, "DOA*01:01:03")
self.assertEqual(alignment.sequences[5].id, "DOA*01:01:04:01")
self.assertEqual(alignment.sequences[6].id, "DOA*01:01:04:02")
self.assertEqual(alignment.sequences[7].id, "DOA*01:01:05")
self.assertEqual(alignment.sequences[8].id, "DOA*01:01:06")
self.assertEqual(alignment.sequences[9].id, "DOA*01:02")
self.assertEqual(alignment.sequences[10].id, "DOA*01:03")
self.assertEqual(alignment.sequences[11].id, "DOA*01:04N")
self.assertEqual(
alignment.sequences[0].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[1].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[2].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[3].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[4].seq,
"DHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[5].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[6].seq,
"DHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[7].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[8].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[9].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNCSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[10].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDIVVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment.sequences[11].seq,
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPPSTSLTAPRASSPMNLMRNSCSLWTX",
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 0, 34, 222],
[0, 28, 62, 250],
[0, 0, 34, 222],
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 28, 62, 250],
[0, 28, 62, 62],
],
),
)
)
self.assertEqual(
alignment[0],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[1],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[2],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[3],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[4],
"----------------------------DHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[5],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[6],
"----------------------------DHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[7],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[8],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[9],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDILVERSNCSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[10],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPAFYQSYGASGQFTHEFDEEQLFSVDLKKSEAVWRLPEFGDFARFDPQGGLAGIAAIKAHLDIVVERSNRSRAINVPPRVTVLPKSRVELGQPNILICIVDNIFPPVINITWLRNGQTVTEGVAQTSFYSQPDHLFRKFHYLPFVPSAEDVYDCQVEHWGLDAPLLRHWELQVPIPPPDAMETLVCALGLAIGLVGFLVGTVLIIMGTYVSSVPR",
)
self.assertEqual(
alignment[11],
"MALRAGLVLGFHTLMTLLSPQEAGATKADHMGSYGPPSTSLTAPRASSPMNLMRNSCSLWTX--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------",
)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
sgalpha01/biopython | Bio/Align/bed.py | # Copyright 2022 by <NAME>. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.Align support for BED (Browser Extensible Data) files.
The Browser Extensible Data (BED) format, stores a series of pairwise
alignments in a single file. Typically they are used for transcript to genome
alignments. BED files store the alignment positions and alignment scores, but
not the aligned sequences.
See http://genome.ucsc.edu/FAQ/FAQformat.html#format1
You are expected to use this module via the Bio.Align functions.
Coordinates in the BED format are defined in terms of zero-based start
positions (like Python) and aligning region sizes.
A minimal aligned region of length one and starting at first position in the
source sequence would have ``start == 0`` and ``size == 1``.
As we can see in this example, ``start + size`` will give one more than the
zero-based end position. We can therefore manipulate ``start`` and
``start + size`` as python list slice boundaries.
"""
import numpy
from Bio.Align import Alignment
from Bio.Align import interfaces
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import BiopythonExperimentalWarning
import warnings
warnings.warn(
"Bio.Align.bed is an experimental module which may undergo "
"significant changes prior to its future official release.",
BiopythonExperimentalWarning,
)
class AlignmentWriter(interfaces.AlignmentWriter):
"""Alignment file writer for the Browser Extensible Data (BED) file format."""
def __init__(self, target, bedN=12):
"""Create an AlignmentWriter object.
Arguments:
- target - output stream or file name
- bedN - number of columns in the BED file.
This must be between 3 and 12; default value is 12.
"""
if bedN < 3 or bedN > 12:
raise ValueError("bedN must be between 3 and 12")
super().__init__(target, mode="w")
self.bedN = bedN
def format_alignment(self, alignment):
"""Return a string with one alignment formatted as a BED line."""
if not isinstance(alignment, Alignment):
raise TypeError("Expected an Alignment object")
coordinates = alignment.coordinates
if not coordinates.size: # alignment consists of gaps only
return ""
bedN = self.bedN
target, query = alignment.sequences
try:
chrom = target.id
except AttributeError:
chrom = "target"
assert coordinates[0, 0] < coordinates[0, -1]
if coordinates[1, 0] > coordinates[1, -1]:
# DNA/RNA mapped to reverse strand of DNA/RNA
strand = "-"
else:
# mapped to forward strand
strand = "+"
# variable names follow those in the BED file format specification
blockSizes = []
blockStarts = []
tStart, qStart = coordinates[:, 0]
for tEnd, qEnd in coordinates[:, 1:].transpose():
if tStart == tEnd:
qStart = qEnd
elif qStart == qEnd:
tStart = tEnd
else:
blockSize = tEnd - tStart
blockStarts.append(tStart)
blockSizes.append(blockSize)
tStart = tEnd
qStart = qEnd
chromStart = blockStarts[0] # start of alignment in target
chromEnd = blockStarts[-1] + blockSize # end of alignment in target
fields = [chrom, str(chromStart), str(chromEnd)]
if bedN == 3:
return "\t".join(fields) + "\n"
try:
name = query.id
except AttributeError:
name = "query"
fields.append(name)
if bedN == 4:
return "\t".join(fields) + "\n"
try:
score = alignment.score
except AttributeError:
score = 0
fields.append(str(score))
if bedN == 5:
return "\t".join(fields) + "\n"
fields.append(strand)
if bedN == 6:
return "\t".join(fields) + "\n"
try:
thickStart = alignment.thickStart
except AttributeError:
thickStart = chromStart
fields.append(str(thickStart))
if bedN == 7:
return "\t".join(fields) + "\n"
try:
thickEnd = alignment.thickEnd
except AttributeError:
thickEnd = chromEnd
fields.append(str(thickEnd))
if bedN == 8:
return "\t".join(fields) + "\n"
try:
itemRgb = alignment.itemRgb
except AttributeError:
itemRgb = "0"
fields.append(str(itemRgb))
if bedN == 9:
return "\t".join(fields) + "\n"
blockCount = len(blockSizes)
fields.append(str(blockCount))
if bedN == 10:
return "\t".join(fields) + "\n"
fields.append(",".join(map(str, blockSizes)) + ",")
if bedN == 11:
return "\t".join(fields) + "\n"
blockStarts -= chromStart
fields.append(",".join(map(str, blockStarts)) + ",")
return "\t".join(fields) + "\n"
class AlignmentIterator(interfaces.AlignmentIterator):
"""Alignment iterator for Browser Extensible Data (BED) files.
Each line in the file contains one pairwise alignment, which are loaded
and returned incrementally. Additional alignment information is stored as
attributes of each alignment.
"""
def __init__(self, source):
"""Create an AlignmentIterator object.
Arguments:
- source - input data or file name
"""
super().__init__(source, mode="t", fmt="BED")
def parse(self, stream):
"""Parse the next alignment from the stream."""
if stream is None:
raise StopIteration
for line in stream:
words = line.split()
bedN = len(words)
if bedN < 3 or bedN > 12:
raise ValueError("expected between 3 and 12 columns, found %d" % bedN)
chrom = words[0]
chromStart = int(words[1])
chromEnd = int(words[2])
if bedN > 3:
name = words[3]
else:
name = None
if bedN > 5:
strand = words[5]
else:
strand = "+"
if bedN > 9:
blockCount = int(words[9])
blockSizes = [
int(blockSize) for blockSize in words[10].rstrip(",").split(",")
]
blockStarts = [
int(blockStart) for blockStart in words[11].rstrip(",").split(",")
]
if len(blockSizes) != blockCount:
raise ValueError(
"Inconsistent number of block sizes (%d found, expected %d)"
% (len(blockSizes), blockCount)
)
if len(blockStarts) != blockCount:
raise ValueError(
"Inconsistent number of block start positions (%d found, expected %d)"
% (len(blockStarts), blockCount)
)
blockSizes = numpy.array(blockSizes)
blockStarts = numpy.array(blockStarts)
tPosition = 0
qPosition = 0
coordinates = [[tPosition, qPosition]]
for blockSize, blockStart in zip(blockSizes, blockStarts):
if blockStart != tPosition:
coordinates.append([blockStart, qPosition])
tPosition = blockStart
tPosition += blockSize
qPosition += blockSize
coordinates.append([tPosition, qPosition])
coordinates = numpy.array(coordinates).transpose()
qSize = sum(blockSizes)
else:
blockSize = chromEnd - chromStart
coordinates = numpy.array([[0, blockSize], [0, blockSize]])
qSize = blockSize
coordinates[0, :] += chromStart
query_sequence = Seq(None, length=qSize)
query_record = SeqRecord(query_sequence, id=name)
target_record = SeqRecord(None, id=chrom)
records = [target_record, query_record]
if strand == "-":
coordinates[1, :] = qSize - coordinates[1, :]
if chromStart != coordinates[0, 0]:
raise ValueError(
"Inconsistent chromStart found (%d, expected %d)"
% (chromStart, coordinates[0, 0])
)
if chromEnd != coordinates[0, -1]:
raise ValueError(
"Inconsistent chromEnd found (%d, expected %d)"
% (chromEnd, coordinates[0, -1])
)
alignment = Alignment(records, coordinates)
if bedN <= 4:
yield alignment
continue
score = words[4]
try:
score = float(score)
except ValueError:
pass
else:
if score.is_integer():
score = int(score)
alignment.score = score
if bedN <= 6:
yield alignment
continue
alignment.thickStart = int(words[6])
if bedN <= 7:
yield alignment
continue
alignment.thickEnd = int(words[7])
if bedN <= 8:
yield alignment
continue
alignment.itemRgb = words[8]
yield alignment
|
sgalpha01/biopython | Tests/test_Align_clustal.py | <reponame>sgalpha01/biopython<gh_stars>0
# Copyright 2006-2014 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.clustal module."""
import unittest
import warnings
from io import StringIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align.clustal import AlignmentIterator
from Bio.Align.clustal import AlignmentWriter
class TestClustalReadingWriting(unittest.TestCase):
def check_reading_writing(self, path):
alignments = AlignmentIterator(path)
stream = StringIO()
writer = AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=1, maxcount=1)
self.assertEqual(n, 1)
alignments = AlignmentIterator(path)
alignment = next(alignments)
stream.seek(0)
saved_alignments = AlignmentIterator(stream)
self.assertEqual(saved_alignments.program, alignments.program)
self.assertEqual(saved_alignments.version, alignments.version)
saved_alignment = next(saved_alignments)
with self.assertRaises(StopIteration):
next(saved_alignments)
self.assertEqual(len(alignment), len(saved_alignment))
for i, (sequence, saved_sequence) in enumerate(
zip(alignment.sequences, saved_alignment.sequences)
):
self.assertEqual(sequence.id, saved_sequence.id)
self.assertEqual(sequence.seq, saved_sequence.seq)
self.assertEqual(alignment[i], saved_alignment[i])
def test_clustalw(self):
path = "Clustalw/clustalw.aln"
# includes the sequence length on the right hand side of each line
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(alignments.program, "CLUSTAL")
self.assertEqual(alignments.version, "1.81")
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (2 rows x 601 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 2)
self.assertEqual(alignment.sequences[0].id, "gi|4959044|gb|AAD34209.1|AF069")
self.assertEqual(alignment.sequences[1].id, "gi|671626|emb|CAA85685.1|")
self.assertEqual(
alignment.sequences[0].seq,
"MENSDSNDKGSDQSAAQRRSQMDRLDREEAFYQFVNNLSEEDYRLMRDNNLLGTPGESTEEELLRRLQQIKEGPPPQSPDENRAGESSDDVTNSDSIIDWLNSVRQTGNTTRSRQRGNQSWRAVSRTNPNSGDFRFSLEINVNRNNGSQTSENESEPSTRRLSVENMESSSQRQMENSASESASARPSRAERNSTEAVTEVPTTRAQRRARSRSPEHRRTRARAERSMSPLQPTSEIPRRAPTLEQSSENEPEGSSRTRHHVTLRQQISGPELLGRGLFAASGSRNPSQGTSSSDTGSNSESSGSGQRPPTIVLDLQVRRVRPGEYRQRDSIASRTRSRSQAPNNTVTYESERGGFRRTFSRSERAGVRTYVSTIRIPIRRILNTGLSETTSVAIQTMLRQIMTGFGELSYFMYSDSDSEPSASVSSRNVERVESRNGRGSSGGGNSSGSSSSSSPSPSSSGESSESSSKMFEGSSEGGSSGPSRKDGRHRAPVTFDESGSLPFFSLAQFFLLNEDDEDQPRGLTKEQIDNLAMRSFGENDALKTCSVCITEYTEGDKLRKLPCSHEFHVHCIDRWLSENSTCPICRRAVLSSGNRESVV",
)
self.assertEqual(
alignment.sequences[1].seq,
"MSPQTETKASVGFKAGVKEYKLTYYTPEYETKDTDILAAFRVTPQPGVPPEEAGAAVAAESSTGTWTTVWTD<KEY>SV<KEY>ETGEIKGHYLNATAGTCEEM<KEY>SRGIYFTQDWVSL<KEY>GGIHVWHMPALTEIFGDDSVLQFGGGTLGHPWGNAPGAVANRVAVEACVKARNEGRDLAAEGNAIIREACKWSPELAAACEVWKEIKFEFPAMD",
)
self.assertEqual(
alignment[0],
"MENSDSNDKGSDQSAAQRRSQMDRLDREEAFYQFVNNLSEEDYRLMRDNNLLGTPGESTEEELLRRLQQIKEGPPPQSPDENRAGESSDDVTNSDSIIDWLNSVRQTGNTTRSRQRGNQSWRAVSRTNPNSGDFRFSLEINVNRNNGSQTSENESEPSTRRLSVENMESSSQRQMENSASESASARPSRAERNSTEAVTEVPTTRAQRRARSRSPEHRRTRARAERSMSPLQPTSEIPRRAPTLEQSSENEPEGSSRTRHHVTLRQQISGPELLGRGLFAASGSRNPSQGTSSSDTGSNSESSGSGQRPPTIVLDLQVRRVRPGEYRQRDSIASRTRSRSQAPNNTVTYESERGGFRRTFSRSERAGVRTYVSTIRIPIRRILNTGLSETTSVAIQTMLRQIMTGFGELSYFMYSDSDSEPSASVSSRNVERVESRNGRGSSGGGNSSGSSSSSSPSPSSSGESSESSSKMFEGSSEGGSSGPSRKDGRHRAPVTFDESGSLPFFSLAQFFLLNEDDEDQPRGLTKEQIDNLAMRSFGENDALKTCSVCITEYTEGDKLRKLPCSHEFHVHCIDRWLSE-NSTCPICRRAVLSSGNRESVV",
)
self.assertEqual(
alignment[1],
"---------MSPQTETKASVGFKAGVKEYKLTYYTPEYETKDTDILAAFRVTPQPG-----------------VPPEEAGAAVAAESSTGT---------WTTVWTDGLTSLDRYKG-----RCYHIEPVPG-------------------EKDQCICYVAYPLDLFEEGSVTNMFTSIVGNVFGFKALRALRLEDLRIPVAYVKTFQGPPHGIQVERDKLNKYGRPLLGCTIKPKLGLSAKNYGRAVYECLRGGLDFTKDDENVNSQPFMRWRDRFLFCAEAIYKAQAETGEIKGHYLNATAG-----------------------TCEEMIKRAIFARELGVPIVMHDYLTGGFTANTSLAHYCRDNGLLLHIHRAMHAVIDRQKNHGMHFRVLAKALRLSGGDHIHSGTVVGKLEGERDITLGFVDLLRDDFIEKDRSRGIYFTQDWVSLPGVIPVASG-----------------------------GIHVWHMPALTEIFGDDSVLQFGGGTLGHPWGNAPGAVANRVA-----------VEACVKARNEG---RDLAAEGNAIIREACKWSPELAAACEVWKEIKFEFPAMD---",
)
self.check_reading_writing(path)
def test_msaprobs(self):
path = "Clustalw/msaprobs.aln"
# This example was obtained from
# http://virgil.ruc.dk/kurser/Sekvens/Treedraw.htm
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(alignments.program, "MSAPROBS")
self.assertEqual(alignments.version, "0.9.7")
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (8 rows x 298 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 8)
self.assertEqual(alignment.shape, (8, 298))
self.assertEqual(alignment.sequences[0].id, "V_Harveyi_PATH")
self.assertEqual(alignment.sequences[1].id, "B_subtilis_YXEM")
self.assertEqual(alignment.sequences[2].id, "FLIY_ECOLI")
self.assertEqual(alignment.sequences[3].id, "Deinococcus_radiodurans")
self.assertEqual(alignment.sequences[4].id, "B_subtilis_GlnH_homo_YCKK")
self.assertEqual(alignment.sequences[5].id, "YA80_HAEIN")
self.assertEqual(alignment.sequences[6].id, "E_coli_GlnH")
self.assertEqual(alignment.sequences[7].id, "HISJ_E_COLI")
self.assertEqual(
alignment.sequences[0].seq,
"MKNWIKVAVAAIALSAATVQAATEVKVGMSGRYFPFTFVKQDKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGLLETGRIDTISNQITMTDARKAKYLFADPYVVDGAQITVRKGNDSIQGVEDLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDTGIEHDVALGRADAFIMDRLSALELIKKTGLPLQLAGEPFETIQNAWPFVDNEKGRKLQAEVNKALAEMRADGTVEKISVKWFGADITK",
)
self.assertEqual(
alignment.sequences[1].seq,
"MKMKKWTVLVVAALLAVLSACGNGNSSSKEDDNVLHVGATGQSYPFAYKENGKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGELQTGKLDTISNQVAVTDERKETYNFTKPYAYAGTQIVVKKDNTDIKSVDDLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDAYVNSRTVLIAQIKKTGLPLKLAGDPIVYEQVAFPFAKDDAHDKLRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH",
)
self.assertEqual(
alignment.sequences[2].seq,
"MKLAHLGRQALMGVMAVALVAGMSVKSFADEGLLNKVKERGTLLVGLEGTYPPFSFQGDDGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLASLDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTADDLKGKKVGVGLGTNYEEWLRQNVQGVDVRTYDDDPTKYQDLRVGRIDAILVDRLAALDLVKKTNDTLAVTGEAFSRQESGVALRKGNEDLLKAVNDAIAEMQKDGTLQALSEKWFGADVTK",
)
self.assertEqual(
alignment.sequences[3].seq,
"MKKSLLSLKLSGLLVPSVLALSLSACSSPSSTLNQGTLKIAMEGTYPPFTSKNEQGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAGLQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLADLKGKRVGSTLGSNYEKQLIDTGDIKIVTYPGAPEILADLVAGRIDAAYNDRLVVNYIINDQKLPVRGAGQIGDAAPVGIALKKGNSALKDQIDKALTEMRSDGTFEKISQKWFGQDVGQP",
)
self.assertEqual(
alignment.sequences[4].seq,
"MKKALLALFMVVSIAALAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVGTEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAGLNSKRFDVVANQVGKTDREDKYDFSDKYTTSRAVVVTKKDNNDIKSEADVKGKTSAQSLTSNYNKLATNAGAKVEGVEGMAQALQMIQQARVDMTYNDKLAVLNYLKTSGNKNVKIAFETGEPQSTYFTFRKGSGEVVDQVNKALKEMKEDGTLSKISKKWFGEDVSK",
)
self.assertEqual(
alignment.sequences[5].seq,
"MKKLLFTTALLTGAIAFSTFSHAGEIADRVEKTKTLLVGTEGTYAPFTFHDKSGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAGLNAKRFDVIANQTNPSPERLKKYSFTTPYNYSGGVIVTKSSDNSIKSFEDLKGRKSAQSATSNWGKDAKAAGAQILVVDGLAQSLELIKQGRAEATINDKLAVLDYFKQHPNSGLKIAYDRGDKTPTAFAFLQGEDALITKFNQVLEALRQDGTLKQISIEWFGYDITQ",
)
self.assertEqual(
alignment.sequences[6].seq,
"MKSVLKVSLAALTLAFAVSSHAADKKLVVATDTAFVPFEFKQGDKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPALQTKNVDLALAGITITDERKKAIDFSDGYYKSGLLVMVKANNNDVKSVKDLDGKVVAVKSGTGSVDYAKANIKTKDLRQFPNIDNAYMELGTNRADAVLHDTPNILYFIKTAGNGQFKAVGDSLEAQQYGIAFPKGSDELRDKVNGALKTLRENGTYNEIYKKWFGTEPK",
)
self.assertEqual(
alignment.sequences[7].seq,
"MKKLVLSLSLVLAFSSATAAFAAIPQNIRIGTDPTYAPFESKNSQGELVGFDIDLAKELCKRINTQCTFVENPLDALIPSLKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQPTVESLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDAAFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKEDNELREALNKAFAEMRADGTYEKLAKKYFDFDVYGG",
)
self.assertEqual(
alignment[0],
"MKNW--------IKV----AVAAI-A--LSAA-------------------TVQAATEVKVGMSGRYFPFTFVK--QDKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGLLETGRIDTISNQITMTDARKAKYLFADPYVVDGAQITVRK-GNDSIQGVEDLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDT--GIEHDVALGRADAFIMDRLSALE-LIKKTG-LPLQLAGEPFE-----TIQNAWPFVDNEKGRKLQAEVNKALAEMRADGTVEKISVKWFGADITK----",
)
self.assertEqual(
alignment[1],
"MKMKKW------TVL----VVAALLA-VLSACGN------------G-NSSSKEDDNVLHVGATGQSYPFAYKE--NGKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGELQTGKLDTISNQVAVTDERKETYNFTKPYAYAGTQIVVKK-DNTDIKSVDDLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDAYVNSRTVLIA-QIKKTG-LPLKLAGDPIV-----YEQVAFPFAKDDAHDKLRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH",
)
self.assertEqual(
alignment[2],
"MKLAHLGRQALMGVM----AVALVAG--MSVKSF---------ADEG-LLNKVKERGTLLVGLEGTYPPFSFQGD-DGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLASLDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTADDLKGKKVGVGLGTNYEEWLRQN--VQGVDVRTYDDDPTKYQDLRVGRIDAILVDRLAALD-LVKKTN-DTLAVTGEAFS-----RQESGVALRK--GNEDLLKAVNDAIAEMQKDGTLQALSEKWFGADVTK----",
)
self.assertEqual(
alignment[3],
"MKKSLL------SLKLSGLLVPSVLALSLSACSS---------------PSSTLNQGTLKIAMEGTYPPFTSKNE-QGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAGLQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLADLKGKRVGSTLGSNYEKQLI-D--TGDIKIVTYPGAPEILADLVAGRIDAAYNDRLVVNY-IIND-QKLPVRGAGQIGD-----AAPVGIALKK--GNSALKDQIDKALTEMRSDGTFEKISQKWFGQDVGQ---P",
)
self.assertEqual(
alignment[4],
"MKKALL------ALF----MVVSIAA--LAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVGTEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAGLNSKRFDVVANQVGKTD-REDKYDFSDKYTTSRAVVVTKK-DNNDIKSEADVKGKTSAQSLTSNYNKLAT-N--A-GAKVEGVEGMAQALQMIQQARVDMTYNDKLAVLN-YLKTSGNKNVKIAFETGE-----PQSTYFTFRK--GSGEVVDQVNKALKEMKEDGTLSKISKKWFGEDVSK----",
)
self.assertEqual(
alignment[5],
"MKKLLF------TTA----LLTGAIA--FSTFS-----------HAGEIADRVEKTKTLLVGTEGTYAPFTFHDK-SGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAGLNAKRFDVIANQTNPSPERLKKYSFTTPYNYSGGVIVTKS-SDNSIKSFEDLKGRKSAQSATSNWGKDAK-A--A-GAQILVVDGLAQSLELIKQGRAEATINDKLAVLD-YFKQHPNSGLKIAYDRGD-----KTPTAFAFLQ--GEDALITKFNQVLEALRQDGTLKQISIEWFGYDITQ----",
)
self.assertEqual(
alignment[6],
"MKSVL-------KVS----LAALTLA--FAVSSH---------A----------ADKKLVVATDTAFVPFEFKQ--GDKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPALQTKNVDLALAGITITDERKKAIDFSDGYYKSGLLVMVKAN-NNDVKSVKDLDGKVVAVKSGTGSVDYAKAN--IKTKDLRQFPNIDNAYMELGTNRADAVLHDTPNILY-FIKTAGNGQFKAVGDSLE-----AQQYGIAFPK--GSDELRDKVNGALKTLRENGTYNEIYKKWFGTEP-K----",
)
self.assertEqual(
alignment[7],
"MKKLVL------SLS----LV---LA--FSSATA---------------A-FAAIPQNIRIGTDPTYAPFESKNS-QGELVGFDIDLAKELCKRINTQCTFVENPLDALIPSLKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAK-NSDIQPTVESLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDAAFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRK--EDNELREALNKAFAEMRADGTYEKLAKKYFDFDVYG---G",
)
self.assertEqual(
alignment.column_annotations["clustal_consensus"],
"** . :: *. *: : :. ** . .: *::::. : :. . ..: *.: . * : * *: . .. .: *: . : .: : * : . .: : .:: : .: .: :: :** . : ::*. : ",
)
self.check_reading_writing(path)
def test_muscle(self):
path = "Clustalw/muscle.aln"
# includes the sequence length on the right hand side of each line
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(alignments.program, "MUSCLE")
self.assertEqual(alignments.version, "3.8")
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (3 rows x 687 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 3)
self.assertEqual(alignment.sequences[0].id, "Test1seq")
self.assertEqual(alignment.sequences[1].id, "AT3G20900.1-SEQ")
self.assertEqual(alignment.sequences[2].id, "AT3G20900.1-CDS")
self.assertEqual(
alignment.sequences[0].seq,
"AGTTACAATAACTGACGAAGCTAAGTAGGCTACTAATTAACGTCATCAACCTAATACATAGCACTTAGAAAAAAGTGAAGTAAGAAAATATAAAATAATAAAAGGGTGGGTTATCAATTGATAGTGTAAATCATCGTATTCCGGTGATATACCCTACCACAAAAACTCAAACCGACTTGATTCAAATCATCTCAATAAATTAGCGCCAAAATAATGAAAAAAATAATAACAAACAAAAACAAACCAAAATAAGAAAAAACATTACGCAAAACATAATAATTTACTCTTCGTTATTGTATTAACAAATCAAAGAGCTGAATTTTGATCACCTGCTAATACTACTTTCTGTATTGATCCTATATCAACGTAAACAAAGATACTAATAATTAACTAAAAGTACGTTCATCGATCGTGTTCGTTGACGAAGAAGAGCTCTATCTCCGGCGGAGCAAAGAAAACGATCTGTCTCCGTCGTAACACACGGTCGCTAGAGAAACTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCGTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTT",
)
self.assertEqual(
alignment.sequences[1].seq,
"ATGAACAAAGTAGCGAGGAAGAACAAAACATCAGGTGAACAAAAAAAAAACTCAATCCACATCAAAGTTACAATAACTGACGAAGCTAAGTAGGCTAGAAATTAAAGTCATCAACCTAATACATAGCACTTAGAAAAAAGTGAAGCAAGAAAATATAAAATAATAAAAGGGTGGGTTATCAATTGATAGTGTAAATCATAGTTGATTTTTGATATACCCTACCACAAAAACTCAAACCGACTTGATTCAAATCATCTCAAAAAACAAGCGCCAAAATAATGAAAAAAATAATAACAAAAACAAACAAACCAAAATAAGAAAAAACATTACGCAAAACATAATAATTTACTCTTCGTTATTGTATTAACAAATCAAAGAGATGAATTTTGATCACCTGCTAATACTACTTTCTGTATTGATCCTATATCAAAAAAAAAAAAGATACTAATAATTAACTAAAAGTACGTTCATCGATCGTGTGCGTTGACGAAGAAGAGCTCTATCTCCGGCGGAGCAAAGAAAACGATCTGTCTCCGTCGTAACACACAGTTTTTCGAGACCCTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCCTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTAG",
)
self.assertEqual(
alignment.sequences[2].seq,
"ATGAACAAAGTAGCGAGGAAGAACAAAACATCAGCAAAGAAAACGATCTGTCTCCGTCGTAACACACAGTTTTTCGAGACCCTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCCTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTAG",
)
self.assertEqual(
alignment[0],
"-----------------------------------------------------------------AGTTACAATAACTGACGAAGCTAAGTAGGCTACTAATTAACGTCATCAACCTAATACATAGCACTTAGAAAAAAGTGAAGTAAGAAAATATAAAATAATAAAAGGGTGGGTTATCAATTGATAGTGTAAATCATCGTATTCCGGTGATATACCCTACCACAAAAACTCAAACCGACTTGATTCAAATCATCTCAATAAATTAGCGCCAAAATAATGAAAAAAATAATAACAAACAAAAACAAACCAAAATAAGAAAAAACATTACGCAAAACATAATAATTTACTCTTCGTTATTGTATTAACAAATCAAAGAGCTGAATTTTGATCACCTGCTAATACTACTTTCTGTATTGATCCTATATCAACGTAAACAAAGATACTAATAATTAACTAAAAGTACGTTCATCGATCGTGTTCGTTGACGAAGAAGAGCTCTATCTCCGGCGGAGCAAAGAAAACGATCTGTCTCCGTCGTAACACACGGTCGCTAGAGAAACTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCGTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTT-",
)
self.assertEqual(
alignment[1],
"ATGAACAAAGTAGCGAGGAAGAACAAAACATCAGGTGAACAAAAAAAAAACTCAATCCACATCAAAGTTACAATAACTGACGAAGCTAAGTAGGCTAGAAATTAAAGTCATCAACCTAATACATAGCACTTAGAAAAAAGTGAAGCAAGAAAATATAAAATAATAAAAGGGTGGGTTATCAATTGATAGTGTAAATCATAGTTGATTTTTGATATACCCTACCACAAAAACTCAAACCGACTTGATTCAAATCATCTCAAAAAACAAGCGCCAAAATAATGAAAAAAATAATAACAAAAACAAACAAACCAAAATAAGAAAAAACATTACGCAAAACATAATAATTTACTCTTCGTTATTGTATTAACAAATCAAAGAGATGAATTTTGATCACCTGCTAATACTACTTTCTGTATTGATCCTATATCAAAAAAAAAAAAGATACTAATAATTAACTAAAAGTACGTTCATCGATCGTGTGCGTTGACGAAGAAGAGCTCTATCTCCGGCGGAGCAAAGAAAACGATCTGTCTCCGTCGTAACACACAGTTTTTCGAGACCCTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCCTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTAG",
)
self.assertEqual(
alignment[2],
"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ATGAACAAAGTAGCGAGGAAGAA------------------------------CAAAACATC----------------------------------------------------------------------------------------------------------------------------------------------------------------------------AGCAAAGAAAACGATCTGTCTCCGTCGTAACACACAGTTTTTCGAGACCCTTTGCTTCTTCGGCGCCGGTGGACACGTCAGCATCTCCGGTATCCTAGACTTCTTGGCTTTCGGGGTACAACAACCGCCTGGTGACGTCAGCACCGCTGCTGGGGATGGAGAGGGAACAGAGTAG",
)
self.assertEqual(
alignment.column_annotations["clustal_consensus"],
" ***** *** ** * ** * ******** *********************************** ** * **** ******************************************************************************* ******************************************** ",
)
self.check_reading_writing(path)
def test_kalign(self):
"""Make sure we can parse the Kalign header."""
path = "Clustalw/kalign.aln"
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(alignments.program, "Kalign")
self.assertEqual(alignments.version, "2.0")
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (2 rows x 27 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 2)
self.assertEqual(alignment.sequences[0].id, "Test1seq")
self.assertEqual(alignment.sequences[1].id, "AT3G20900")
self.assertEqual(alignment.sequences[0].seq, "GCTGGGGATGGAGAGGGAACAGAGTT")
self.assertEqual(alignment.sequences[1].seq, "GCTGGGGATGGAGAGGGAACAGAGTAG")
self.assertEqual(alignment[0], "GCTGGGGATGGAGAGGGAACAGAGT-T")
self.assertEqual(alignment[1], "GCTGGGGATGGAGAGGGAACAGAGTAG")
self.check_reading_writing(path)
def test_probcons(self):
path = "Clustalw/probcons.aln"
# example taken from the PROBCONS documentation
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(alignments.program, "PROBCONS")
self.assertEqual(alignments.version, "1.12")
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (5 rows x 101 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 5)
self.assertEqual(alignment.sequences[0].id, "plas_horvu")
self.assertEqual(alignment.sequences[1].id, "plas_chlre")
self.assertEqual(alignment.sequences[2].id, "plas_anava")
self.assertEqual(alignment.sequences[3].id, "plas_proho")
self.assertEqual(alignment.sequences[4].id, "azup_achcy")
self.assertEqual(
alignment.sequences[0].seq,
"DVLLGANGGVLVFEPNDFSVKAGETITFKNNAGYPHNVVFDEDAVPSGVDVSKISQEEYLTAPGETFSVTLTVPGTYGFYCEPHAGAGMVGKVTV",
)
self.assertEqual(
alignment.sequences[1].seq,
"VKLGADSGALEFVPKTLTIKSGETVNFVNNAGFPHNIVFDEDAIPSGVNADAISRDDYLNAPGETYSVKLTAAGEYGYYCEPHQGAGMVGKIIV",
)
self.assertEqual(
alignment.sequences[2].seq,
"VKLGSDKGLLVFEPAKLTIKPGDTVEFLNNKVPPHNVVFDAALNPAKSADLAKSLSHKQLLMSPGQSTSTTFPADAPAGEYTFYCEPHRGAGMVGKITV",
)
self.assertEqual(
alignment.sequences[3].seq,
"VQIKMGTDKYAPLYEPKALSISAGDTVEFVMNKVGPHNVIFDKVPAGESAPALSNTKLRIAPGSFYSVTLGTPGTYSFYCTPHRGAGMVGTITV",
)
self.assertEqual(
alignment.sequences[4].seq,
"VHMLNKGKDGAMVFEPASLKVAPGDTVTFIPTDKGHNVETIKGMIPDGAEAFKSKINENYKVTFTAPGVYGVKCTPHYGMGMVGVVEV",
)
self.assertEqual(
alignment.column_annotations["clustal_consensus"],
" :: . : * :.: .*:*: * . **: * . . :*. . .. ...: . .* * * ** * **** : *",
)
self.assertEqual(
alignment[0],
"D-VLLGANGGVLVFEPNDFSVKAGETITFKNNAGYPHNVVFDEDAVPSG-VD-VSKISQEEYLTAPGETFSVTLTV---PGTYGFYCEPHAGAGMVGKVTV",
)
self.assertEqual(
alignment[1],
"--VKLGADSGALEFVPKTLTIKSGETVNFVNNAGFPHNIVFDEDAIPSG-VN-ADAISRDDYLNAPGETYSVKLTA---AGEYGYYCEPHQGAGMVGKIIV",
)
self.assertEqual(
alignment[2],
"--VKLGSDKGLLVFEPAKLTIKPGDTVEFLNNKVPPHNVVFDAALNPAKSADLAKSLSHKQLLMSPGQSTSTTFPADAPAGEYTFYCEPHRGAGMVGKITV",
)
self.assertEqual(
alignment[3],
"VQIKMGTDKYAPLYEPKALSISAGDTVEFVMNKVGPHNVIFDK--VPAG-ES-APALSNTKLRIAPGSFYSVTLGT---PGTYSFYCTPHRGAGMVGTITV",
)
self.assertEqual(
alignment[4],
"VHMLNKGKDGAMVFEPASLKVAPGDTVTFIPTDK-GHNVETIKGMIPDG-AE-A-------FKSKINENYKVTFTA---PGVYGVKCTPHYGMGMVGVVEV",
)
self.check_reading_writing(path)
def test_empty(self):
"""Checking empty file."""
stream = StringIO()
with self.assertRaises(ValueError):
AlignmentIterator(stream)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
sgalpha01/biopython | Tests/test_Align_phylip.py | <reponame>sgalpha01/biopython
# Copyright 2006-2014 by <NAME>. All rights reserved.
# Revisions copyright 2011 <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.phylip module."""
import unittest
import warnings
from io import StringIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align.phylip import AlignmentIterator
from Bio.Align.phylip import AlignmentWriter
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.phylip."
) from None
class TestPhylipReading(unittest.TestCase):
def check_reading_writing(self, path):
alignments = AlignmentIterator(path)
stream = StringIO()
writer = AlignmentWriter(stream)
n = writer.write_file(alignments, mincount=1, maxcount=1)
self.assertEqual(n, 1)
alignments = AlignmentIterator(path)
alignment = next(alignments)
stream.seek(0)
saved_alignments = AlignmentIterator(stream)
saved_alignment = next(saved_alignments)
with self.assertRaises(StopIteration):
next(saved_alignments)
self.assertEqual(len(alignment), len(saved_alignment))
for i, (sequence, saved_sequence) in enumerate(
zip(alignment.sequences, saved_alignment.sequences)
):
self.assertEqual(sequence.id, saved_sequence.id)
self.assertEqual(sequence.seq, saved_sequence.seq)
self.assertEqual(alignment[i], saved_alignment[i])
self.assertTrue(
numpy.array_equal(alignment.coordinates, saved_alignment.coordinates)
)
def test_one(self):
path = "Phylip/one.dat"
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (8 rows x 286 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 8)
self.assertEqual(alignment.sequences[0].id, "V_Harveyi_")
self.assertEqual(alignment.sequences[1].id, "B_subtilis")
self.assertEqual(alignment.sequences[2].id, "B_subtilis")
self.assertEqual(alignment.sequences[3].id, "YA80_HAEIN")
self.assertEqual(alignment.sequences[4].id, "FLIY_ECOLI")
self.assertEqual(alignment.sequences[5].id, "E_coli_Gln")
self.assertEqual(alignment.sequences[6].id, "Deinococcu")
self.assertEqual(alignment.sequences[7].id, "HISJ_E_COL")
self.assertEqual(
alignment.sequences[0].seq,
"MKNWIKVAVAAIALSAATVQAATEVKVGMSGRYFPFTFVKQDKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGLLETGRIDTISNQITMTDARKAKYLFADPYVVDGAQITVRKGNDSIQGVEDLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDTGIEHDVALGRADAFIMDRLSALELIKKTGLPLQLAGEPFETIQNAWPFVDNEKGRKLQAEVNKALAEMRADGTVEKISVKWFGADITK",
)
self.assertEqual(
alignment.sequences[1].seq,
"MKMKKWTVLVVAALLAVLSACGNGNSSSKEDDNVLHVGATGQSYPFAYKENGKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGELQTGKLDTISNQVAVTDERKETYNFTKPYAYAGTQIVVKKDNTDIKSVDDLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDAYVNSRTVLIAQIKKTGLPLKLAGDPIVYEQVAFPFAKDDAHDKLRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH",
)
self.assertEqual(
alignment.sequences[2].seq,
"MKKALLALFMVVSIAALAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVGTEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAGLNSKRFDVVANQVGKTDREDKYDFSDKYTTSRAVVVTKKDNNDIKSEADVKGKTSAQSLTSNYNKLATNAGAKVEGVEGMAQALQMIQQARVDMTYNDKLAVLNYLKTSGNKNVKIAFETGEPQSTYFTFRKGSGEVVDQVNKALKEMKEDGTLSKISKKWFGEDVSK",
)
self.assertEqual(
alignment.sequences[3].seq,
"MKKLLFTTALLTGAIAFSTFSHAGEIADRVEKTKTLLVGTEGTYAPFTFHDKSGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAGLNAKRFDVIANQTNPSPERLKKYSFTTPYNYSGGVIVTKSSDNSIKSFEDLKGRKSAQSATSNWGKDAKAAGAQILVVDGLAQSLELIKQGRAEATINDKLAVLDYFKQHPNSGLKIAYDRGDKTPTAFAFLQGEDALITKFNQVLEALRQDGTLKQISIEWFGYDITQ",
)
self.assertEqual(
alignment.sequences[4].seq,
"MKLAHLGRQALMGVMAVALVAGMSVKSFADEGLLNKVKERGTLLVGLEGTYPPFSFQGDDGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLASLDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTADDLKGKKVGVGLGTNYEEWLRQNVQGVDVRTYDDDPTKYQDLRVGRIDAILVDRLAALDLVKKTNDTLAVTGEAFSRQESGVALRKGNEDLLKAVNDAIAEMQKDGTLQALSEKWFGADVTK",
)
self.assertEqual(
alignment.sequences[5].seq,
"MKSVLKVSLAALTLAFAVSSHAADKKLVVATDTAFVPFEFKQGDKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPALQTKNVDLALAGITITDERKKAIDFSDGYYKSGLLVMVKANNNDVKSVKDLDGKVVAVKSGTGSVDYAKANIKTKDLRQFPNIDNAYMELGTNRADAVLHDTPNILYFIKTAGNGQFKAVGDSLEAQQYGIAFPKGSDELRDKVNGALKTLRENGTYNEIYKKWFGTEPK",
)
self.assertEqual(
alignment.sequences[6].seq,
"MKKSLLSLKLSGLLVPSVLALSLSACSSPSSTLNQGTLKIAMEGTYPPFTSKNEQGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAGLQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLADLKGKRVGSTLGSNYEKQLIDTGDIKIVTYPGAPEILADLVAGRIDAAYNDRLVVNYIINDQKLPVRGAGQIGDAAPVGIALKKGNSALKDQIDKALTEMRSDGTFEKISQKWFGQDVGQP",
)
self.assertEqual(
alignment.sequences[7].seq,
"MKKLVLSLSLVLAFSSATAAFAAIPQNIRIGTDPTYAPFESKNSQGELVGFDIDLAKELCKRINTQCTFVENPLDALIPSLKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQPTVESLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDAAFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKEDNELREALNKAFAEMRADGTYEKLAKKYFDFDVYGG",
)
self.assertEqual(
alignment[0],
"--MKNWIKVAVAAIA--LSAA------------------TVQAATEVKVGMSGRYFPFTFVKQ--DKLQGFEVDMWDEIGKRNDYKIEYVTANFSGLFGLLETGRIDTISNQITMTDARKAKYLFADPYVVDG-AQITVRKGNDSIQGVEDLAGKTVAVNLGSNFEQLLRDYDKDGKINIKTYDT--GIEHDVALGRADAFIMDRLSALE-LIKKT-GLPLQLAGEPFETI-----QNAWPFVDNEKGRKLQAEVNKALAEMRADGTVEKISVKWFGADITK----",
)
self.assertEqual(
alignment[1],
"MKMKKWTVLVVAALLAVLSACG------------NGNSSSKEDDNVLHVGATGQSYPFAYKEN--GKLTGFDVEVMEAVAKKIDMKLDWKLLEFSGLMGELQTGKLDTISNQVAVTDERKETYNFTKPYAYAG-TQIVVKKDNTDIKSVDDLKGKTVAAVLGSNHAKNLESKDPDKKINIKTYETQEGTLKDVAYGRVDAYVNSRTVLIA-QIKKT-GLPLKLAGDPIVYE-----QVAFPFAKDDAHDKLRKKVNKALDELRKDGTLKKLSEKYFNEDITVEQKH",
)
self.assertEqual(
alignment[2],
"MKKALLALFMVVSIAALAACGAGNDNQSKDNAKDGDLWASIKKKGVLTVGTEGTYEPFTYHDKDTDKLTGYDVEVITEVAKRLGLKVDFKETQWGSMFAGLNSKRFDVVANQVG-KTDREDKYDFSDKYTTSR-AVVVTKKDNNDIKSEADVKGKTSAQSLTSNYNKLATN----AGAKVEGVEGMAQALQMIQQARVDMTYNDKLAVLN-YLKTSGNKNVKIAFETGEPQ-----STYFTFRKGS--GEVVDQVNKALKEMKEDGTLSKISKKWFGEDVSK----",
)
self.assertEqual(
alignment[3],
"MKKLLFTTALLTGAIAFSTF-----------SHAGEIADRVEKTKTLLVGTEGTYAPFTFHDK-SGKLTGFDVEVIRKVAEKLGLKVEFKETQWDAMYAGLNAKRFDVIANQTNPSPERLKKYSFTTPYNYSG-GVIVTKSSDNSIKSFEDLKGRKSAQSATSNWGKDAKA----AGAQILVVDGLAQSLELIKQGRAEATINDKLAVLD-YFKQHPNSGLKIAYDRGDKT-----PTAFAFLQGE--DALITKFNQVLEALRQDGTLKQISIEWFGYDITQ----",
)
self.assertEqual(
alignment[4],
"MKLAHLGRQALMGVMAVALVAG---MSVKSFADEG-LLNKVKERGTLLVGLEGTYPPFSFQGD-DGKLTGFEVEFAQQLAKHLGVEASLKPTKWDGMLASLDSKRIDVVINQVTISDERKKKYDFSTPYTISGIQALVKKGNEGTIKTADDLKGKKVGVGLGTNYEEWLRQNV--QGVDVRTYDDDPTKYQDLRVGRIDAILVDRLAALD-LVKKT-NDTLAVTGEAFSRQ-----ESGVALRKGN--EDLLKAVNDAIAEMQKDGTLQALSEKWFGADVTK----",
)
self.assertEqual(
alignment[5],
"--MKSVLKVSLAALTLAFAVS------------------SHAADKKLVVATDTAFVPFEFKQG--DKYVGFDVDLWAAIAKELKLDYELKPMDFSGIIPALQTKNVDLALAGITITDERKKAIDFSDGYYKSG-LLVMVKANNNDVKSVKDLDGKVVAVKSGTGSVDYAKAN--IKTKDLRQFPNIDNAYMELGTNRADAVLHDTPNILY-FIKTAGNGQFKAVGDSLEAQ-----QYGIAFPKGS--DELRDKVNGALKTLRENGTYNEIYKKWFGTEPK-----",
)
self.assertEqual(
alignment[6],
"-MKKSLLSLKLSGLLVPSVLALS--------LSACSSPSSTLNQGTLKIAMEGTYPPFTSKNE-QGELVGFDVDIAKAVAQKLNLKPEFVLTEWSGILAGLQANKYDVIVNQVGITPERQNSIGFSQPYAYSRPEIIVAKNNTFNPQSLADLKGKRVGSTLGSNYEKQLIDTG---DIKIVTYPGAPEILADLVAGRIDAAYNDRLVVNY-IINDQ-KLPVRGAGQIGDAA-----PVGIALKKGN--SALKDQIDKALTEMRSDGTFEKISQKWFGQDVGQP---",
)
self.assertEqual(
alignment[7],
"MKKLVLSLSLVLAFSSATAAF-------------------AAIPQNIRIGTDPTYAPFESKNS-QGELVGFDIDLAKELCKRINTQCTFVENPLDALIPSLKAKKIDAIMSSLSITEKRQQEIAFTDKLYAADSRLVVAKNSDIQP-TVESLKGKRVGVLQGTTQETFGNEHWAPKGIEIVSYQGQDNIYSDLTAGRIDAAFQDEVAASEGFLKQPVGKDYKFGGPSVKDEKLFGVGTGMGLRKED--NELREALNKAFAEMRADGTYEKLAKKYFDFDVYGG---",
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[
0,
0,
0,
13,
13,
16,
17,
17,
17,
17,
17,
17,
17,
17,
17,
18,
41,
41,
41,
90,
91,
109,
109,
121,
122,
146,
147,
148,
149,
150,
151,
160,
160,
183,
183,
188,
188,
202,
202,
212,
214,
247,
248,
248,
248,
],
[
0,
1,
2,
15,
17,
20,
21,
22,
22,
22,
22,
22,
23,
24,
27,
28,
51,
51,
51,
100,
101,
119,
119,
131,
132,
156,
157,
158,
159,
160,
161,
170,
172,
195,
195,
200,
200,
214,
214,
224,
226,
259,
260,
261,
264,
],
[
0,
1,
2,
15,
17,
20,
21,
22,
23,
25,
31,
34,
35,
36,
39,
40,
63,
64,
65,
114,
114,
132,
132,
144,
145,
169,
169,
169,
169,
169,
170,
179,
181,
204,
204,
209,
210,
224,
224,
234,
234,
267,
268,
268,
268,
],
[
0,
1,
2,
15,
17,
20,
20,
20,
20,
20,
20,
23,
24,
25,
28,
29,
52,
52,
53,
102,
103,
121,
121,
133,
134,
158,
158,
158,
158,
158,
159,
168,
170,
193,
193,
198,
199,
213,
213,
223,
223,
256,
257,
257,
257,
],
[
0,
1,
2,
15,
17,
20,
21,
22,
22,
22,
28,
31,
32,
32,
35,
36,
59,
59,
60,
109,
110,
128,
129,
141,
142,
166,
167,
168,
168,
168,
169,
178,
180,
203,
203,
208,
208,
222,
222,
232,
232,
265,
266,
266,
266,
],
[
0,
0,
0,
13,
15,
18,
19,
19,
19,
19,
19,
19,
19,
19,
19,
20,
43,
43,
43,
92,
93,
111,
111,
123,
124,
148,
149,
149,
149,
150,
151,
160,
162,
185,
185,
190,
191,
205,
205,
215,
215,
248,
248,
248,
248,
],
[
0,
0,
1,
14,
16,
19,
20,
21,
22,
22,
22,
25,
26,
27,
30,
31,
54,
54,
55,
104,
105,
123,
124,
136,
137,
161,
162,
163,
163,
163,
163,
172,
174,
197,
197,
202,
202,
216,
216,
226,
226,
259,
260,
261,
261,
],
[
0,
1,
2,
15,
17,
20,
21,
21,
21,
21,
21,
21,
21,
21,
21,
21,
44,
44,
45,
94,
95,
113,
114,
126,
126,
150,
151,
152,
153,
154,
155,
164,
166,
189,
190,
195,
196,
210,
215,
225,
225,
258,
259,
260,
260,
],
]
),
)
)
self.check_reading_writing(path)
def test_two_and_three(self):
paths = ("Phylip/two.dat", "Phylip/three.dat")
# derived from http://atgc.lirmm.fr/phyml/usersguide.html
for path in paths:
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (5 rows x 60 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 5)
self.assertEqual(alignment.sequences[0].id, "Tax1")
self.assertEqual(alignment.sequences[1].id, "Tax2")
self.assertEqual(alignment.sequences[2].id, "Tax3")
self.assertEqual(alignment.sequences[3].id, "Tax4")
self.assertEqual(alignment.sequences[4].id, "Tax5")
self.assertEqual(
alignment.sequences[0].seq,
"CCATCTCACGGTCGGTACGATACACCTGCTTTTGGCAGGAAATGGTCAATATTACAAGGT",
)
self.assertEqual(
alignment.sequences[1].seq,
"CCATCTCACGGTCAGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAACATTAAAAGAT",
)
self.assertEqual(
alignment.sequences[2].seq,
"CCATCTCCCGCTCAGTAAGATACCCCTGCTGTTGGCGGGAAATCGTCAATATTAAAAGGT",
)
self.assertEqual(
alignment.sequences[3].seq,
"TCATCTCATGGTCAATAAGATACTCCTGCTTTTGGCGGGAAATGGTCAATCTTAAAAGGT",
)
self.assertEqual(
alignment.sequences[4].seq,
"CCATCTCACGGTCGGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAATATTAAAAGGT",
)
self.assertEqual(
alignment[0],
"CCATCTCACGGTCGGTACGATACACCTGCTTTTGGCAGGAAATGGTCAATATTACAAGGT",
)
self.assertEqual(
alignment[1],
"CCATCTCACGGTCAGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAACATTAAAAGAT",
)
self.assertEqual(
alignment[2],
"CCATCTCCCGCTCAGTAAGATACCCCTGCTGTTGGCGGGAAATCGTCAATATTAAAAGGT",
)
self.assertEqual(
alignment[3],
"TCATCTCATGGTCAATAAGATACTCCTGCTTTTGGCGGGAAATGGTCAATCTTAAAAGGT",
)
self.assertEqual(
alignment[4],
"CCATCTCACGGTCGGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAATATTAAAAGGT",
)
self.check_reading_writing(path)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[0, 60], [0, 60], [0, 60], [0, 60], [0, 60]]),
)
)
def test_four(self):
path = "Phylip/four.dat"
# File derived from here:
# http://evolution.genetics.washington.edu/phylip/doc/sequence.html
# Note the lack of any white space between names 2 and 3 and their seqs.
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (5 rows x 42 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 5)
self.assertEqual(alignment.sequences[0].id, "Turkey")
self.assertEqual(alignment.sequences[1].id, "Salmo gair")
self.assertEqual(alignment.sequences[2].id, "<NAME>")
self.assertEqual(alignment.sequences[3].id, "Chimp")
self.assertEqual(alignment.sequences[4].id, "Gorilla")
self.assertEqual(
alignment.sequences[0].seq, "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT"
)
self.assertEqual(
alignment.sequences[1].seq, "AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT"
)
self.assertEqual(
alignment.sequences[2].seq, "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA"
)
self.assertEqual(
alignment.sequences[3].seq, "AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT"
)
self.assertEqual(
alignment.sequences[4].seq, "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA"
)
self.assertEqual(alignment[0], "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT")
self.assertEqual(alignment[1], "AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT")
self.assertEqual(alignment[2], "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA")
self.assertEqual(alignment[3], "AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT")
self.assertEqual(alignment[4], "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA")
self.check_reading_writing(path)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[0, 42], [0, 42], [0, 42], [0, 42], [0, 42]]),
)
)
def test_five_and_six(self):
paths = ("Phylip/five.dat", "Phylip/six.dat")
# http://evolution.genetics.washington.edu/phylip/doc/sequence.html
for path in paths:
with open(path) as stream:
alignments = AlignmentIterator(stream)
alignment = next(alignments)
with self.assertRaises(StopIteration):
next(alignments)
self.assertEqual(
repr(alignment),
"<Bio.Align.Alignment object (5 rows x 42 columns) at 0x%x>"
% id(alignment),
)
self.assertEqual(len(alignment), 5)
self.assertEqual(alignment.sequences[0].id, "Turkey")
self.assertEqual(alignment.sequences[1].id, "Salmo gair")
self.assertEqual(alignment.sequences[2].id, "H. Sapiens")
self.assertEqual(alignment.sequences[3].id, "Chimp")
self.assertEqual(alignment.sequences[4].id, "Gorilla")
self.assertEqual(
alignment.sequences[0].seq, "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT"
)
self.assertEqual(
alignment.sequences[1].seq, "AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT"
)
self.assertEqual(
alignment.sequences[2].seq, "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA"
)
self.assertEqual(
alignment.sequences[3].seq, "AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT"
)
self.assertEqual(
alignment.sequences[4].seq, "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA"
)
self.assertEqual(alignment[0], "AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT")
self.assertEqual(alignment[1], "AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT")
self.assertEqual(alignment[2], "ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA")
self.assertEqual(alignment[3], "AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT")
self.assertEqual(alignment[4], "AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[0, 42], [0, 42], [0, 42], [0, 42], [0, 42]]),
)
)
self.check_reading_writing(path)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
sgalpha01/biopython | Tests/test_Align_mauve.py | # Copyright 2006-2014 by <NAME>. All rights reserved.
# Revisions copyright 2011 <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.Align.mauve module."""
import os
import unittest
import warnings
from io import StringIO
from Bio.Seq import Seq, MutableSeq
from Bio import SeqIO
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio.Align.mauve import AlignmentIterator, AlignmentWriter
try:
import numpy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install numpy if you want to use Bio.Align.mauve."
) from None
class TestCombinedFile(unittest.TestCase):
def setUp(self):
filename = "combined.fa"
path = os.path.join("Mauve", filename)
records = SeqIO.parse(path, "fasta")
self.sequences = {
str(index): record.seq for index, record in enumerate(records)
}
def test_parse(self):
path = os.path.join("Mauve", "combined.xmfa")
saved_alignments = []
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(len(alignments.metadata), 3)
self.assertEqual(alignments.metadata["FormatVersion"], "Mauve1")
self.assertEqual(alignments.metadata["File"], "combined.fa")
self.assertEqual(
alignments.metadata["BackboneFile"], "combined.xmfa.bbcols"
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 3)
self.assertEqual(len(alignment.sequences), 3)
self.assertEqual(alignment.sequences[0].id, "0")
self.assertEqual(
repr(alignment.sequences[0].seq),
"Seq({1: 'AAAAGGAAAGTACGGCCCGGCCACTCCGGGTGTGTGCTAGGAGGGCTT'}, length=49)",
)
sequence = self.sequences[alignment.sequences[0].id]
start = len(sequence) - alignment.coordinates[0, 0]
end = len(sequence) - alignment.coordinates[0, -1]
self.assertEqual(start, 1)
self.assertEqual(end, 49)
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment.sequences[1].id, "1")
self.assertEqual(alignment.sequences[1].seq, "")
start = alignment.coordinates[1, 0]
end = alignment.coordinates[1, -1]
self.assertEqual(start, 0)
self.assertEqual(end, 0)
self.assertEqual(alignment.sequences[2].id, "2")
self.assertEqual(
repr(alignment.sequences[2].seq),
"Seq({1: 'AAGCCCTGCGCGCTCAGCCGGAGTGTCCCGGGCCCTGCTTTCCTTTT'}, length=48)",
)
start = alignment.coordinates[2, 0]
end = alignment.coordinates[2, -1]
self.assertEqual(start, 1)
self.assertEqual(end, 48)
sequence = self.sequences[alignment.sequences[2].id][start:end]
self.assertEqual(alignment.sequences[2].seq[start:end], sequence)
self.assertEqual(
alignment[0], "AAGCCCTCCTAGCACACACCCGGAGTGG-CCGGGCCGTACTTTCCTTTT"
)
self.assertEqual(
alignment[1], "-------------------------------------------------"
)
self.assertEqual(
alignment[2], "AAGCCCTGC--GCGCTCAGCCGGAGTGTCCCGGGCCCTGCTTTCCTTTT"
)
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array(
[
[49, 40, 38, 21, 21, 1],
[0, 0, 0, 0, 0, 0],
[1, 10, 10, 27, 28, 48],
]
),
)
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "0")
self.assertEqual(alignment.sequences[0].seq, "G")
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "G")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 1]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "0")
self.assertEqual(
repr(alignment.sequences[0].seq), "Seq({49: 'A'}, length=50)"
)
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "A")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[49, 50]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "1")
self.assertEqual(
alignment.sequences[0].seq, "GAAGAGGAAAAGTAGATCCCTGGCGTCCGGAGCTGGGACGT"
)
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "GAAGAGGAAAAGTAGATCCCTGGCGTCCGGAGCTGGGACGT")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 41]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "2")
self.assertEqual(alignment.sequences[0].seq, "C")
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "C")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 1]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "2")
self.assertEqual(
repr(alignment.sequences[0].seq), "Seq({48: 'C'}, length=49)"
)
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "C")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[48, 49]]))
)
self.assertRaises(StopIteration, next, alignments)
# As each nucleotide in each sequence is stored exactly once in an XMFA
# file, we can reconstitute the full sequences:
self.assertEqual(len(saved_alignments), 6)
maxindex = -1
for alignment in saved_alignments:
for record in alignment.sequences:
index = int(record.id)
if index > maxindex:
maxindex = index
n = maxindex + 1
self.assertEqual(n, 3)
lengths = [0] * n
for alignment in saved_alignments:
for record in alignment.sequences:
index = int(record.id)
length = len(record.seq)
if length > lengths[index]:
lengths[index] = length
self.assertEqual(lengths[0], 50)
self.assertEqual(lengths[1], 41)
self.assertEqual(lengths[2], 49)
sequences = [None] * 3
for index, length in enumerate(lengths):
sequences[index] = MutableSeq("N" * length)
# Now fill up the sequences:
for alignment in saved_alignments:
for row, record in zip(alignment.coordinates, alignment.sequences):
index = int(record.id)
start = row[0]
end = row[-1]
if start > end:
start, end = end, start
sequences[index][start:end] = record.seq[start:end]
# Confirm that the fully defined sequences agree with the Fasta file:
filename = "combined.fa"
for index, sequence in enumerate(sequences):
sequences[index] = Seq(sequence)
key = str(index)
self.assertEqual(sequences[index], self.sequences[key])
# Make sure we can replace the partially defined sequences by these
# fully defined sequences, and get the same alignment:
alignment = saved_alignments[0]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(
alignment[0], "AAGCCCTCCTAGCACACACCCGGAGTGG-CCGGGCCGTACTTTCCTTTT"
)
self.assertEqual(
alignment[1], "-------------------------------------------------"
)
self.assertEqual(
alignment[2], "AAGCCCTGC--GCGCTCAGCCGGAGTGTCCCGGGCCCTGCTTTCCTTTT"
)
alignment = saved_alignments[1]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(alignment[0], "G")
alignment = saved_alignments[2]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(alignment[0], "A")
alignment = saved_alignments[3]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(alignment[0], "GAAGAGGAAAAGTAGATCCCTGGCGTCCGGAGCTGGGACGT")
alignment = saved_alignments[4]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(alignment[0], "C")
alignment = saved_alignments[5]
for record in alignment.sequences:
index = int(record.id)
record.seq = sequences[index]
self.assertEqual(alignment[0], "C")
def test_write_read(self):
path = os.path.join("Mauve", "combined.xmfa")
with open(path) as stream:
data = stream.read()
stream = StringIO()
stream.write(data)
stream.seek(0)
alignments = AlignmentIterator(stream)
output = StringIO()
writer = AlignmentWriter(output)
n = writer.write_file(alignments)
self.assertEqual(n, 6)
output.seek(0)
self.assertEqual(output.read(), data)
class TestSeparateFiles(unittest.TestCase):
def setUp(self):
self.sequences = {}
for species in ("equCab1", "canFam2", "mm9"):
filename = f"{species}.fa"
path = os.path.join("Mauve", filename)
record = SeqIO.read(path, "fasta")
self.sequences[filename] = record.seq
def test_parse(self):
path = os.path.join("Mauve", "separate.xmfa")
saved_alignments = []
with open(path) as stream:
alignments = AlignmentIterator(stream)
self.assertEqual(len(alignments.metadata), 2)
self.assertEqual(alignments.metadata["FormatVersion"], "Mauve1")
self.assertEqual(
alignments.metadata["BackboneFile"], "separate.xmfa.bbcols"
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 3)
self.assertEqual(len(alignment.sequences), 3)
self.assertEqual(alignment.sequences[0].id, "equCab1.fa")
self.assertEqual(alignment.sequences[0].seq, "")
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(start, 0)
self.assertEqual(end, 0)
self.assertEqual(alignment.sequences[1].id, "canFam2.fa")
self.assertEqual(
repr(alignment.sequences[1].seq),
"Seq({25: 'GTCCCGGGCCCTGCTTTCCTTTTC'}, length=49)",
)
start = alignment.coordinates[1, 0]
end = alignment.coordinates[1, -1]
sequence = self.sequences[alignment.sequences[1].id]
self.assertEqual(start, 25)
self.assertEqual(end, 49)
self.assertEqual(alignment.sequences[1].seq[start:end], sequence[start:end])
self.assertEqual(alignment.sequences[2].id, "mm9.fa")
sequence = alignment.sequences[2].seq
start = len(sequence) - alignment.coordinates[2, 0]
end = len(sequence) - alignment.coordinates[2, -1]
self.assertEqual(start, 0)
self.assertEqual(end, 24)
sequence = self.sequences[alignment.sequences[2].id][start:end]
self.assertEqual(alignment.sequences[2].seq[start:end], sequence)
self.assertEqual(alignment[0], "------------------------")
self.assertEqual(alignment[1], "GTCCCGGGCCCTGCTTTCCTTTTC")
self.assertEqual(alignment[2], "GCCAGGGATCTACTTTTCCTCTTC")
self.assertTrue(
numpy.array_equal(
alignment.coordinates,
numpy.array([[0, 0], [25, 49], [24, 0]]),
)
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "equCab1.fa")
self.assertEqual(
alignment.sequences[0].seq,
"GAAAAGGAAAGTACGGCCCGGCCACTCCGGGTGTGTGCTAGGAGGGCTTA",
)
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(
alignment[0], "GAAAAGGAAAGTACGGCCCGGCCACTCCGGGTGTGTGCTAGGAGGGCTTA"
)
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 50]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "canFam2.fa")
self.assertEqual(alignment.sequences[0].seq, "CAAGCCCTGCGCGCTCAGCCGGAGT")
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "CAAGCCCTGCGCGCTCAGCCGGAGT")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[0, 25]]))
)
alignment = next(alignments)
saved_alignments.append(alignment)
self.assertEqual(len(alignment), 1)
self.assertEqual(len(alignment.sequences), 1)
self.assertEqual(alignment.sequences[0].id, "mm9.fa")
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(start, 24)
self.assertEqual(end, 41)
self.assertEqual(alignment.sequences[0].seq[start:end], "GTCCGGAGCTGGGACGT")
sequence = self.sequences[alignment.sequences[0].id]
start = alignment.coordinates[0, 0]
end = alignment.coordinates[0, -1]
self.assertEqual(alignment.sequences[0].seq[start:end], sequence[start:end])
self.assertEqual(alignment[0], "GTCCGGAGCTGGGACGT")
self.assertTrue(
numpy.array_equal(alignment.coordinates, numpy.array([[24, 41]]))
)
self.assertRaises(StopIteration, next, alignments)
# As each nucleotide in each sequence is stored exactly once in an XMFA
# file, we can reconstitute the full sequences:
self.assertEqual(len(saved_alignments), 4)
filenames = []
for alignment in saved_alignments:
for record in alignment.sequences:
filename = record.id
filenames.append(filename)
filenames = set(filenames)
n = len(filenames)
self.assertEqual(n, 3)
lengths = {filename: 0 for filename in filenames}
for alignment in saved_alignments:
for record in alignment.sequences:
filename = record.id
length = len(record.seq)
if length > lengths[filename]:
lengths[filename] = length
self.assertEqual(lengths["equCab1.fa"], 50)
self.assertEqual(lengths["canFam2.fa"], 49)
self.assertEqual(lengths["mm9.fa"], 41)
sequences = {}
for filename, length in lengths.items():
sequences[filename] = MutableSeq("N" * length)
# Now fill up the sequences:
for alignment in saved_alignments:
for row, record in zip(alignment.coordinates, alignment.sequences):
filename = record.id
start = row[0]
end = row[-1]
if start > end:
start, end = end, start
sequences[filename][start:end] = record.seq[start:end]
# Confirm that the fully defined sequences agree with the Fasta file:
for filename, sequence in sequences.items():
sequences[filename] = Seq(sequence)
self.assertEqual(sequences[filename], self.sequences[filename])
# Make sure we can replace the partially defined sequences by these
# fully defined sequences, and get the same alignment:
alignment = saved_alignments[0]
for record in alignment.sequences:
filename = record.id
record.seq = sequences[filename]
self.assertEqual(alignment[0], "------------------------")
self.assertEqual(alignment[1], "GTCCCGGGCCCTGCTTTCCTTTTC")
self.assertEqual(alignment[2], "GCCAGGGATCTACTTTTCCTCTTC")
alignment = saved_alignments[1]
for record in alignment.sequences:
filename = record.id
record.seq = sequences[filename]
self.assertEqual(
alignment[0], "GAAAAGGAAAGTACGGCCCGGCCACTCCGGGTGTGTGCTAGGAGGGCTTA"
)
alignment = saved_alignments[2]
for record in alignment.sequences:
filename = record.id
record.seq = sequences[filename]
self.assertEqual(alignment[0], "CAAGCCCTGCGCGCTCAGCCGGAGT")
alignment = saved_alignments[3]
for record in alignment.sequences:
filename = record.id
record.seq = sequences[filename]
self.assertEqual(alignment[0], "GTCCGGAGCTGGGACGT")
def test_write_read(self):
path = os.path.join("Mauve", "separate.xmfa")
with open(path) as stream:
data = stream.read()
stream = StringIO()
stream.write(data)
stream.seek(0)
alignments = AlignmentIterator(stream)
output = StringIO()
writer = AlignmentWriter(output)
n = writer.write_file(alignments)
self.assertEqual(n, 4)
output.seek(0)
self.assertEqual(output.read(), data)
class TestMauveBasic(unittest.TestCase):
def test_empty(self):
stream = StringIO()
with self.assertRaisesRegex(ValueError, "Empty file."):
AlignmentIterator(stream)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
Unigmos/FamousWord | FamousWord.py | from selenium import webdriver
import time
import csv
#入力ファイルと出力ファイルの設定
input_path = 'input/input.txt'
output_path = 'output/output.txt'
csv_output_path = 'output/output.csv'
#ChromeDriverのパス指定
chrome_driver_path = 'chromedriver_win32/chromedriver.exe'
#入力ファイルの読み込み
with open(input_path, "r") as file:
datas = file.readlines()
#結果表示用の辞書定義
dictionary = {}
for data in datas:
data = data.replace("\n","")
#ドライバー指定
driver = webdriver.Chrome(chrome_driver_path)
driver.get('https://www.google.com/')
#検索(入力欄を探す→文字列の入力→検索実行)
search_box = driver.find_element_by_name("q")
search_box.send_keys(data)
search_box.submit()
#検索結果数取得
result_box = driver.find_elements_by_id("result-stats")
for searched_number in result_box:
numbers = searched_number.text #この時点で約〇〇件(〇〇秒)が表示
#文字列の調整(「,」や文字列の削除)
numbers = int(numbers.removeprefix('約 ').partition(' 件')[0].replace(',', ''))
#辞書に検索文字と検索結果数の追加
dictionary.setdefault(data, numbers)
time.sleep(2)
#アクティブブラウザを閉じる
driver.close()
#辞書のvalueを比較(降順)
sort_dictionary = sorted(dictionary.items(), key=lambda i: i[1], reverse=True)
#メモに検索結果数の降順で保存
with open(output_path, "w", encoding = "utf-8") as write_file:
for file_writer in sort_dictionary:
write_file.write(file_writer[0] + ":" + str(file_writer[1]) + "\n")
#csvに検索結果数の降順で出力
with open(csv_output_path, "w") as csv_write_file:
csv_writer = csv.writer(csv_write_file,lineterminator='\n')
csv_writer.writerows(sort_dictionary) |
AzusaChino/iris-python | notifiers/wechat.py | <gh_stars>0
import notifiers
class Wechat(notifiers):
def __init__(self) -> None:
pass |
AzusaChino/iris-python | providers/__init__.py | class Provider(object):
base_url = None
def __init__(self, url: str) -> None:
self.base_url = url
def watch(self):
pass |
AzusaChino/iris-python | providers/taobao.py | from providers import Provider
class Taobao(Provider):
def __init__(self) -> None:
super().__init__()
def watch(self):
super().watch() |
AzusaChino/iris-python | notifiers/message.py | from notifiers import Notifier
class Message(Notifier):
def __init__(self) -> None:
pass |
AzusaChino/iris-python | providers/jd.py | <filename>providers/jd.py
from providers import Provider
class Jd(Provider):
def __init__(self) -> None:
pass |
AzusaChino/iris-python | app.py | import toml
class IrisWatcher:
config_file = "./app.toml"
def __init__(self):
parsed_config = toml.load(self.config_file)
self.frequency = parsed_config["config"]["frequency"]
def watch(self, product_name: str):
print(self.websites)
print(self.frequency)
if __name__ == '__main__':
iw = IrisWatcher()
iw.watch("Hello")
|
AzusaChino/iris-python | notifiers/email.py | <reponame>AzusaChino/iris-python<gh_stars>0
from notifiers import Notifier
class Email(Notifier):
def __init__(self) -> None:
pass |
PacktPublishing/Real-time-Django | chapter-3/Rendering HTML/app/simple_app/views.py | from django.shortcuts import render
def index(request):
return render(request, 'index.html', {})
def bingo(request):
return render(request, 'bingo.html', {})
def bmi(request):
return render(request, 'bmi.html', {}) |
PacktPublishing/Real-time-Django | chapter-3/Sending JSON/app/simple_app/views.py | <filename>chapter-3/Sending JSON/app/simple_app/views.py<gh_stars>0
from django.shortcuts import render
def index(request):
return render(request, 'index.html', {})
def bingo(request):
return render(request, 'bingo.html', {}) |
PacktPublishing/Real-time-Django | chapter-4/social-network_step_6/app/website/models.py | from django.db import models
class Message(models.Model):
author = models.CharField(max_length=100)
text = models.TextField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "messages"
verbose_name_plural = "Messages"
def __str__(self):
return self.text[:10] + "..."
|
PacktPublishing/Real-time-Django | chapter-6/app/app_template/backends.py | from django.contrib.auth.models import User
from django.contrib.auth.backends import BaseBackend
class EmailBackend(BaseBackend):
"""
Email authentication backend
"""
def authenticate(self, request, username=None, password=<PASSWORD>, **kwargs):
"""
Authenticate a user based on email address as the user name.
"""
if "@" in username:
kwargs = {"email": username}
else:
kwargs = {"username": username}
try:
user = User.objects.get(**kwargs)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
# Future re-implementation with token for auto login
class TokenBackend(BaseBackend):
def authenticate(self, request, token=None):
pass
|
PacktPublishing/Real-time-Django | chapter-7/app/website/views.py | <filename>chapter-7/app/website/views.py
from django.shortcuts import render
from .forms import CommentForm
from django.contrib.auth.decorators import login_required
def all_posts(request):
return render(
request,
"base.html",
{
"page": "pages/list_posts.html",
"active_nav": "all_posts",
},
)
def single(request):
return render(
request,
"base.html",
{"page": "pages/single.html", "form": CommentForm()},
)
def about(request):
return render(
request,
"base.html",
{"page": "pages/about.html", "active_nav": "about"},
)
def page_not_found(request, exception):
return render(request, "base.html", {"page": "pages/404.html"})
|
PacktPublishing/Real-time-Django | chapter-3/Rendering HTML/app/simple_app/consumers.py | <filename>chapter-3/Rendering HTML/app/simple_app/consumers.py
# app/simple_app/consumers.py
from channels.generic.websocket import WebsocketConsumer
from datetime import datetime
import time
import threading
from random import randint
from channels.generic.websocket import JsonWebsocketConsumer
from django.template.loader import render_to_string
class EchoConsumer(WebsocketConsumer):
def connect(self):
"""Event when client connects"""
# Informs client of successful connection
self.accept()
# Send message to client
self.send(text_data="You are connected by WebSockets!")
# Send message to client every second
def send_time(self):
while True:
# Send message to client
self.send(text_data=str(datetime.now().strftime("%H:%M:%S")))
# Sleep for 1 second
time.sleep(1)
threading.Thread(target=send_time, args=(self,)).start()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive(self, text_data):
"""Event when data is received"""
pass
class BingoConsumer(JsonWebsocketConsumer):
def connect(self):
self.accept()
## Send numbers to client
# Generates numbers 5 random numbers, approximately, between 1 and 10
random_numbers = list(set([randint(1, 10) for _ in range(5)]))
message = {
'action': 'New ticket',
'ticket': random_numbers
}
self.send_json(content=message)
## Send balls
def send_ball(self):
while True:
# Send message to client
random_ball = randint(1, 10)
message = {
'action': 'New ball',
'ball': random_ball
}
self.send_json(content=message)
# Sleep for 1 second
time.sleep(1)
threading.Thread(target=send_ball, args=(self,)).start()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive_json(self, data):
"""Event when data is received"""
pass
class BMIConsumer(JsonWebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive_json(self, data):
"""Event when data is received"""
height = data['height'] / 100
weight = data['weight']
bmi = round(weight / (height ** 2), 1)
self.send_json(
content={
"action": "BMI result",
"html": render_to_string(
"components/_bmi_result.html",
{"height": height, "weight": weight, "bmi": bmi}
)
}
)
|
PacktPublishing/Real-time-Django | chapter-6/project_template/asgi.py | <gh_stars>0
# project_template/asgi.py
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_template.settings")
from django.conf import settings
django.setup()
from django.core.asgi import get_asgi_application
from channels.security.websocket import OriginValidator
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from app.app_template.consumers import ExampleConsumer
application = ProtocolTypeRouter(
{
# Django's ASGI application to handle traditional HTTP requests
"http": get_asgi_application(),
# WebSocket handler
"websocket": OriginValidator(AuthMiddlewareStack(
URLRouter(
[
re_path(r"^ws/example/$", ExampleConsumer.as_asgi()),
]
)
), settings.ALLOWED_HOSTS)
}
)
|
PacktPublishing/Real-time-Django | chapter-7/make_fake_data.py | <filename>chapter-7/make_fake_data.py<gh_stars>0
# ./manage.py shell < make_fake_data.py
from app.website.models import Post, Comment
from faker import Faker
# Delete all posts and comments
Post.objects.all().delete()
# Create fake object
fake = Faker()
# Create 30 posts
for _ in range(30):
post = Post(
title=fake.sentence()[:200],
author=fake.fullname()[:20],
content=fake.text(),
)
post.save()
# Create 150 comments
for _ in range(150):
comment = Comment(
author=fake.fullname()[:20],
content=fake.text(),
post=Post.objects.order_by("?").first(),
)
comment.save()
|
PacktPublishing/Real-time-Django | chapter-6/app/app_template/views.py | <reponame>PacktPublishing/Real-time-Django
from django.shortcuts import render
from .forms import LoginForm, SignupForm
from django.contrib.auth.decorators import login_required
def home(request):
return render(
request,
"base.html",
{
"page": "pages/home.html",
"active_nav": "home",
},
)
def login(request):
return render(
request,
"base.html",
{"page": "pages/login.html", "active_nav": "login", "form": LoginForm()},
)
def signup(request):
return render(
request,
"base.html",
{"page": "pages/signup.html", "active_nav": "signup", "form": SignupForm()},
)
@login_required
def profile(request):
return render(
request, "base.html", {"page": "pages/profile.html", "active_nav": "profile"}
)
def page_not_found(request, exception):
return render(request, "base.html", {"page": "pages/404.html"})
|
PacktPublishing/Real-time-Django | chapter-4/social-network_step_6/social_network/settings.py | """
Django settings for social_network project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", "True") == "True"
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS").split(",")
# Application definition
INSTALLED_APPS = [
"channels",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"app.website",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "social_network.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "social_network.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("DB_ENGINE"),
"NAME": os.environ.get("DB_NAME"),
"USER": os.environ.get("DB_USER"),
"PASSWORD": <PASSWORD>("DB_PASSWORD"),
"HOST": os.environ.get("DB_HOST"),
"PORT": os.environ.get("DB_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_ROOT = os.environ.get("STATIC_ROOT")
STATIC_URL = os.environ.get("STATIC_URL")
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = os.environ.get("MEDIA_URL")
DOMAIN = os.environ.get("DOMAIN")
DOMAIN_URL = os.environ.get("DOMAIN_URL")
CSRF_TRUSTED_ORIGINS = [DOMAIN_URL]
"""EMAIL CONFIG"""
DEFAULT_FROM_EMAIL = os.environ.get("EMAIL_ADDRESS")
EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS") == "True"
EMAIL_USE_SSL = os.environ.get("EMAIL_USE_SSL") == "True"
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(os.environ.get("REDIS_HOST"), os.environ.get("REDIS_PORT"))],
},
},
}
ASGI_APPLICATION = "social_network.asgi.application"
|
PacktPublishing/Real-time-Django | chapter-7/blog/urls.py | <gh_stars>0
from django.contrib import admin
from django.urls import path
from app.website import views
urlpatterns = [
path("", views.all_posts, name="all_posts"),
path("article/<slug:slug>/", views.single, name="single"),
path("about/", views.about, name="about"),
path("admin/", admin.site.urls),
]
handler404 = "app.website.views.page_not_found" |
PacktPublishing/Real-time-Django | chapter-4/initial-template/app/app_template/consumers.py | # app/app_template/consumers.py
from channels.generic.websocket import JsonWebsocketConsumer
from django.template.loader import render_to_string
from asgiref.sync import async_to_sync
class ExampleConsumer(JsonWebsocketConsumer):
room_name = 'broadcast'
def connect(self):
"""Event when client connects"""
# Accept the connection
self.accept()
# Assign the Broadcast group
async_to_sync(self.channel_layer.group_add)(self.room_name, self.channel_name)
# Send data
self.send_hello()
def disconnect(self, close_code):
"""Event when client disconnects"""
# Remove from the Broadcast group
async_to_sync(self.channel_layer.group_discard)(self.room_name, self.channel_name)
def receive_json(self, data_received):
"""
Event when data is received
All information will arrive in 2 variables:
'action', with the action to be taken
'data' with the information
"""
# Get the data
data = data_received['data']
# Depending on the action we will do one task or another.
match data_received['action']:
case 'example action':
pass
def send_html(self, event):
"""Event: Send html to client"""
data = {
'selector': event['selector'],
'html': event['html'],
}
self.send_json(data)
def send_hello(self):
"""Send list of messages to client"""
# Render HTML and send to client
async_to_sync(self.channel_layer.group_send)(
self.room_name, {
'type': 'send.html', # Run 'send_html()' method
'selector': '#main',
'html': render_to_string("components/_welcome.html", {})
}
)
|
PacktPublishing/Real-time-Django | chapter-7/app/website/consumers.py | # app/website/consumers.py
from channels.generic.websocket import JsonWebsocketConsumer
import app.website.actions as actions
class ExampleConsumer(JsonWebsocketConsumer):
def connect(self):
"""Event when client connects"""
# Accept the connection
self.accept()
# Make session task list
if "tasks" not in self.scope["session"]:
self.scope["session"]["tasks"] = []
self.scope["session"].save()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive_json(self, data_received):
"""
Event when data is received
All information will arrive in 2 variables:
"action", with the action to be taken
"data" with the information
"""
# Get the data
data = data_received["data"]
# Depending on the action we will do one task or another.
match data_received["action"]:
case "Change page":
actions.send_page(self, data)
case "Next page":
pass
case "Add comment":
pass
def send_html(self, event):
"""Event: Send html to client"""
data = {
"selector": event["selector"],
"html": event["html"],
"append": "append" in event and event["append"],
"url": event["url"] if "url" in event else "",
}
self.send_json(data) |
PacktPublishing/Real-time-Django | chapter-6/app/app_template/actions.py | from .forms import LoginForm, SignupForm
from asgiref.sync import async_to_sync
from django.template.loader import render_to_string
from django.urls import reverse
from channels.auth import login, logout
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from datetime import datetime
def send_page(self, page):
"""Render HTML and send page to client"""
# Prepare context data for page
context = {}
match page:
case "home":
context = {"tasks": self.scope["session"]["tasks"] if "tasks" in self.scope["session"] else []}
case "login":
context = {"form": LoginForm()}
case "signup":
context = {"form": SignupForm()}
# Add user to context if logged in
if "user" in self.scope:
context.update({ "user": self.scope["user"]})
context.update({"active_nav": page})
# Render HTML nav and send to client
self.send_html({
"selector": "#nav",
"html": render_to_string("components/_nav.html", context),
})
# Render HTML page and send to client
self.send_html({
"selector": "#main",
"html": render_to_string(f"pages/{page}.html", context),
"url": reverse(page),
})
def action_signup(self, data):
"""Sign up user"""
form = SignupForm(data)
user_exist = User.objects.filter(email=data["email"]).exists()
if form.is_valid() and data["password"] == data["password_confirm"] and not user_exist:
# Create user
user = User.objects.create_user(data["username"], data["email"], data["password"])
user.is_active = True
user.save()
# Login user
send_page(self, "login")
else:
# Send form errors
self.send_html({
"selector": "#main",
"html": render_to_string("pages/signup.html", {"form": form, "user_exist": user_exist, "passwords_do_not_match": data["password"] != data["password_confirm"]}),
"append": False,
"url": reverse("signup")
})
def action_login(self, data):
"""Log in user"""
form = LoginForm(data)
user = authenticate(username=data["email"], password=data["password"])
if form.is_valid() and user:
async_to_sync(login)(self.scope, user)
self.scope["session"].save()
send_page(self, "profile")
else:
self.send_html({
"selector": "#main",
"html": render_to_string("pages/login.html", {"form": form, "user_does_not_exist": user is None}),
"append": False,
"url": reverse("login")
})
def action_logout(self):
"""Log out user"""
async_to_sync(logout)(self.scope)
self.scope["session"].save()
send_page(self, "login")
def add_lap(self):
"""Add lap to Home page"""
# Send current time to client
self.send_html({
"selector": "#laps",
"html": render_to_string("components/_lap.html", {"time": datetime.now()}),
"append": True,
})
def add_task(self, data):
"""Add task from TODO section"""
# Update task list
self.send_html({
"selector": "#todo",
"html": render_to_string("components/_task-item.html", {"task": data["task"]}),
"append": True,
})
# Add task to list
self.scope["session"]["tasks"].append(data["task"])
self.scope["session"].save() |
PacktPublishing/Real-time-Django | chapter-5/app/chat/views.py | <gh_stars>0
from django.shortcuts import render
from django.contrib.auth.models import User
def index(request):
"""View with chat layout"""
return render(
request, "index.html", {"users": User.objects.all().order_by("username")}
)
|
PacktPublishing/Real-time-Django | chapter-3/Sending plain text/hello_world/asgi.py | <gh_stars>0
# hello_world/asgi.py
import os
from django.core.asgi import get_asgi_application
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from app.simple_app.consumers import EchoConsumer
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_world.settings')
application = ProtocolTypeRouter({
# Django's ASGI application to handle traditional HTTP requests
"http": get_asgi_application(),
# WebSocket handler
"websocket": AuthMiddlewareStack(
URLRouter([
re_path(r"^ws/echo/$", EchoConsumer.as_asgi()),
])
),
}) |
PacktPublishing/Real-time-Django | chapter-7/app/website/actions.py | from .models import Post, Comment
from .forms import CommentForm
from django.template.loader import render_to_string
from django.urls import reverse
POST_PER_PAGE = 5
def send_page(self, data={}):
"""Render HTML and send page to client"""
# Prepare context data for page
page = data["page"]
context = {}
match page:
case "list":
context = {"posts": Post.objects.all().order_by("-created_at")[:POST_PER_PAGE]}
case "single":
context = {"post": Post.objects.get(id=data["id"]) ,"form": CommentForm()}
# Render HTML nav and send to client
self.send_html({
"selector": "#nav",
"html": render_to_string("components/_nav.html", context),
})
# Render HTML page and send to client
self.send_html({
"selector": "#main",
"html": render_to_string(f"pages/{page}.html", context),
"url": reverse(page),
}) |
PacktPublishing/Real-time-Django | chapter-6/app/app_template/forms.py | from django import forms
class LoginForm(forms.Form):
email = forms.CharField(
label="Email",
max_length=255,
widget=forms.EmailInput(attrs={"id": "login-email", "class": "input"}),
)
password = forms.CharField(
label="Password",
max_length=255,
widget=forms.PasswordInput(attrs={"id": "login-password", "class": "input"}),
)
class SignupForm(forms.Form):
username = forms.CharField(
label="Username",
max_length=255,
widget=forms.TextInput(attrs={"id": "signup-username", "class": "input"}),
)
email = forms.EmailField(
label="Email",
max_length=255,
widget=forms.EmailInput(attrs={"id": "signup-email", "class": "input"}),
)
password = forms.CharField(
label="Password",
max_length=255,
widget=forms.PasswordInput(attrs={"id": "signup-password", "class": "input"}),
)
password_confirm = forms.CharField(
label="<PASSWORD> Password",
max_length=255,
widget=forms.PasswordInput(
attrs={"id": "signup-password-confirm", "class": "input"}
),
)
|
PacktPublishing/Real-time-Django | chapter-5/app/chat/consumers.py | <reponame>PacktPublishing/Real-time-Django
# app/chat/consumers.py
from channels.generic.websocket import JsonWebsocketConsumer
from django.template.loader import render_to_string
from asgiref.sync import async_to_sync
from channels.auth import login, logout
from django.contrib.auth.models import User
from .models import Client, Room, Message
class ChatConsumer(JsonWebsocketConsumer):
# At startup delete all clients
Client.objects.all().delete()
def connect(self):
"""Event when client connects"""
# Accept the connection
self.accept()
# Gets a random user not logged in
user = User.objects.exclude(
id__in=Client.objects.all().values("user")
).order_by("?").first()
# Login
async_to_sync(login)(self.scope, user)
self.scope["session"].save()
# Display the username
self.send_html(
{
"selector": "#logged-user",
"html": self.scope["user"].username,
}
)
# Register the client in the database to control who is connected.
Client.objects.create(user=user, channel=self.channel_name)
# Assign the group "hi", the first room that will be displayed when you enter
self.add_client_to_room("hi", True)
# List the messages
self.list_room_messages()
def disconnect(self, close_code):
"""Event when client disconnects"""
# Remove the client from the current room
self.remove_client_from_current_room()
# Deregister the client
Client.objects.get(channel=self.channel_name).delete()
# Logout user
logout(self.scope, self.scope["user"])
def receive_json(self, data_received):
"""
Event when data is received
All information will arrive in 2 variables:
"action", with the action to be taken
"data" with the information
"""
# Get the data
data = data_received["data"]
# Depending on the action we will do one task or another.
match data_received["action"]:
case "Change group":
if data["isGroup"]:
"""isGroup is True: Add to a multi-user room: #hi, #python..."""
self.add_client_to_room(data["groupName"], data["isGroup"])
else:
"""isGroup is False: Add to private room with the target user and the current user."""
# Gets the user to whom you are going to speak
user_target = User.objects.filter(username=data["groupName"]).first()
# Search for rooms where both users match
room = Room.objects.filter(users_subscribed__in=[self.scope["user"]], is_group=False).intersection(Room.objects.filter(users_subscribed__in=[user_target], is_group=False)).first()
if room and user_target and room.users_subscribed.count() == 2:
# An existing group has been found where both target and current users are already talking.
# The current user subscribes
self.add_client_to_room(room.name)
else:
# Looking for a room where the target user is alone.
room = Room.objects.filter(
users_subscribed__in=[
user_target,
],
is_group=False,
).last()
if room and room.users_subscribed.count() == 1:
# There is a room, let's join.
self.add_client_to_room(room.name)
else:
# We have not found any room where the target user is alone, we create a new room.
self.add_client_to_room()
# We inform the visitor in which room this
self.send_room_name()
case "New message":
# We received a new message to save
self.save_message(data["message"])
# Send the list of messages from the room
self.list_room_messages()
def send_html(self, event):
"""Event: Send html to client"""
data = {
"selector": event["selector"],
"html": event["html"],
}
self.send_json(data)
def list_room_messages(self):
"""List all messages from a group"""
room_name = self.get_name_room_active()
# Get the room
room = Room.objects.get(name=room_name)
# Get all messages from the room
messages = Message.objects.filter(room=room).order_by("created_at")
# Render HTML and send to client
async_to_sync(self.channel_layer.group_send)(
room_name, {
"type": "send.html", # Run "send_html()" method
"selector": "#messages-list",
"html": render_to_string("components/_list_messages.html", {"messages": messages})
}
)
def send_room_name(self):
"""Send the room name to the client"""
room_name = self.get_name_room_active()
room = Room.objects.get(name=room_name)
data = {
"selector": "#group-name",
# Concadena # if it is a group for aesthetic reasons
"html": ("#" if room.is_group else "") + room_name,
}
self.send_json(data)
def save_message(self, text):
"""Save a message in the database"""
# Get the room
room = Room.objects.get(name=self.get_name_room_active())
# Save message
Message.objects.create(
user=self.scope["user"],
room=room,
text=text,
)
def add_client_to_room(self, room_name=None, is_group=False):
"""Add customer to a room within Channels and save the reference in the Room model."""
# Get the user client
client = Client.objects.get(user=self.scope["user"])
# Remove the client from the previous room
self.remove_client_from_current_room()
# Get or create room
room, created = Room.objects.get_or_create(name=room_name, is_group=is_group)
# If it has no name, it is assigned "private_{id}"
# For example, if the id is 1, it shall be "private_1".
if not room.name:
room.name = f"private_{room.id}"
room.save()
room.clients_active.add(client)
room.users_subscribed.add(client.user)
room.save()
# Add client to room
async_to_sync(self.channel_layer.group_add)(room.name, self.channel_name)
# Send the group name to the client
self.send_room_name()
def get_name_room_active(self):
"""Get the name of the group from login user"""
room = Room.objects.filter(clients_active__user_id=self.scope["user"].id).first()
return room.name
def remove_client_from_current_room(self):
"""Remove client from current group"""
client = Client.objects.get(user=self.scope["user"])
# Get the current group
rooms = Room.objects.filter(clients_active__in=[client])
for room in rooms:
# Remove the client from the group
async_to_sync(self.channel_layer.group_discard)(room.name, self.channel_name)
# Remove the client from the Room model
room.clients_active.remove(client)
room.save() |
PacktPublishing/Real-time-Django | chapter-5/project_template/asgi.py | <reponame>PacktPublishing/Real-time-Django<filename>chapter-5/project_template/asgi.py
# project_template/asgi.py
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_template.settings")
django.setup()
from django.core.asgi import get_asgi_application
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from app.chat.consumers import ChatConsumer
application = ProtocolTypeRouter(
{
# Django's ASGI application to handle traditional HTTP requests
"http": get_asgi_application(),
# WebSocket handler
"websocket": AuthMiddlewareStack(
URLRouter(
[
re_path(r"^ws/chat/$", ChatConsumer.as_asgi()),
]
)
),
}
)
|
PacktPublishing/Real-time-Django | chapter-4/social-network_step_6/social_network/asgi.py | # social_network/asgi.py
import os
import django
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "social_network.settings")
django.setup()
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from app.website.consumers import SocialNetworkConsumer
application = ProtocolTypeRouter(
{
# Django's ASGI application to handle traditional HTTP requests
"http": get_asgi_application(),
# WebSocket handler
"websocket": AuthMiddlewareStack(
URLRouter(
[
re_path(r"^ws/social-network/$", SocialNetworkConsumer.as_asgi()),
]
)
),
}
)
|
PacktPublishing/Real-time-Django | chapter-4/social-network_step_1/app/website/consumers.py | # app/website/consumers.py
from channels.generic.websocket import JsonWebsocketConsumer
from django.template.loader import render_to_string
from .models import Message
from asgiref.sync import async_to_sync
class SocialNetworkConsumer(JsonWebsocketConsumer):
room_name = 'broadcast'
def connect(self):
"""Event when client connects"""
# Accept the connection
self.accept()
# Assign the Broadcast group
async_to_sync(self.channel_layer.group_add)(self.room_name, self.channel_name)
def disconnect(self, close_code):
"""Event when client disconnects"""
# Remove from the Broadcast group
async_to_sync(self.channel_layer.group_discard)(self.room_name, self.channel_name)
def receive_json(self, data_received):
"""
Event when data is received
All information will arrive in 2 variables:
'action', with the action to be taken
'data' with the information
"""
# Get the data
data = data_received['data']
# Depending on the action we will do one task or another.
match data_received['action']:
case 'add message':
# Add message to database
Message.objects.create(
author=data['author'],
text=data['text'],
) |
PacktPublishing/Real-time-Django | chapter-3/Sending plain text/app/simple_app/consumers.py | <filename>chapter-3/Sending plain text/app/simple_app/consumers.py
# app/simple_app/consumers.py
from channels.generic.websocket import WebsocketConsumer
from datetime import datetime
import time
import threading
class EchoConsumer(WebsocketConsumer):
def connect(self):
"""Event when client connects"""
# Informs client of successful connection
self.accept()
# Send message to client
self.send(text_data="You are connected by WebSockets!")
# Send message to client every second
def send_time(self):
while True:
# Send message to client
self.send(text_data=str(datetime.now().strftime("%H:%M:%S")))
# Sleep for 1 second
time.sleep(1)
threading.Thread(target=send_time, args=(self,)).start()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive(self, text_data):
"""Event when data is received"""
pass |
PacktPublishing/Real-time-Django | chapter-3/Sending JSON/app/simple_app/consumers.py | # app/simple_app/consumers.py
from channels.generic.websocket import WebsocketConsumer
from datetime import datetime
import time
import threading
from random import randint
from channels.generic.websocket import JsonWebsocketConsumer
class EchoConsumer(WebsocketConsumer):
def connect(self):
"""Event when client connects"""
# Informs client of successful connection
self.accept()
# Send message to client
self.send(text_data="You are connected by WebSockets!")
# Send message to client every second
def send_time(self):
while True:
# Send message to client
self.send(text_data=str(datetime.now().strftime("%H:%M:%S")))
# Sleep for 1 second
time.sleep(1)
threading.Thread(target=send_time, args=(self,)).start()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive(self, text_data):
"""Event when data is received"""
pass
class BingoConsumer(JsonWebsocketConsumer):
def connect(self):
self.accept()
## Send numbers to client
# Generates numbers 5 random numbers, approximately, between 1 and 10
random_numbers = list(set([randint(1, 10) for _ in range(5)]))
message = {
'action': 'New ticket',
'ticket': random_numbers
}
self.send_json(content=message)
## Send balls
def send_ball(self):
while True:
# Send message to client
random_ball = randint(1, 10)
message = {
'action': 'New ball',
'ball': random_ball
}
self.send_json(content=message)
# Sleep for 1 second
time.sleep(1)
threading.Thread(target=send_ball, args=(self,)).start()
def disconnect(self, close_code):
"""Event when client disconnects"""
pass
def receive_json(self, data):
"""Event when data is received"""
pass |
PacktPublishing/Real-time-Django | chapter-5/make_fake_users.py | # ./manage.py shell < make_fake_users.py
from django.contrib.auth.models import User
from faker import Faker
fake = Faker()
# Delete all users
User.objects.all().delete()
# Generate 30 random emails and iterate them.
for email in [fake.unique.email() for i in range(5)]:
# Create user in database
user = User.objects.create_user(fake.user_name(), email, "password")
user.last_name = fake.last_name()
user.is_active = True
user.save()
|
PacktPublishing/Real-time-Django | chapter-7/app/website/models.py | <reponame>PacktPublishing/Real-time-Django<gh_stars>0
from django.db import models
# https://github.com/tanrax/demo-HTML-over-WebSockets-in-Django
class Post(models.Model):
title = models.CharField(max_length=200, unique=True)
author = models.CharField(max_length=20)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Comment(models.Model):
name = models.CharField(max_length=20)
content = models.TextField()
post = models.ForeignKey(Post, on_delete= models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name |
PacktPublishing/Real-time-Django | chapter-5/app/chat/models.py | <reponame>PacktPublishing/Real-time-Django
from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
"""
Clients for users
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
channel = models.CharField(max_length=200, blank=True, null=True, default=None)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class Room(models.Model):
"""
Rooms for users
"""
users_subscribed = models.ManyToManyField(User, related_name="users_subscribed")
clients_active = models.ManyToManyField(Client, related_name="clients_active")
name = models.CharField(max_length=255, blank=True, null=True, default=None)
is_group = models.BooleanField(default=False)
def __str__(self):
return self.name
class Message(models.Model):
"""
Messages for users
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
|
korda/gitlab-client | gitlab_api_client.py | import json
import urllib.parse
import urllib.request
import urllib.error
class GitlabApi:
def __init__(self, url: str, token: str, checkout_url: str):
self.__url = url
self.__token = token
self.__checkout_url = checkout_url
self.__api_root = '/api/v4'
def repo_url(self, path: str) -> str:
return self.__checkout_url + '/' + path.lstrip('/')
def get_namespace(self, namespace: str):
return self.__call('GET', f"namespaces/{urllib.parse.quote(namespace, safe='')}")
def create_project(self, path: str, namespace_id):
return self.__call('POST', "projects", path=path, namespace_id=namespace_id, visibility="private")
def projects(self, search: str):
if search:
print("Searching projects containing %s... This make take few moments." % search)
else:
print("Getting list of all projects... This make take few moments.")
projects = []
per_page = 100
current_page = 1
while True:
current_page_projects = self.__call('GET', "projects",
per_page=per_page,
page=current_page,
simple=True,
archived=False,
search=search
)
current_page += 1
projects += current_page_projects
print(f"{len(projects)} projects retrieved so far...")
if len(current_page_projects) == 0 or len(current_page_projects) < per_page:
break
return projects
def __call(self, method, api_path, **kwargs):
query = ''
if kwargs:
query = '?' + urllib.parse.urlencode(kwargs)
request = urllib.request.Request(self.__url + self.__api_root + "/" + api_path.lstrip("/") + query)
request.add_header('Private-Token', self.__token)
request.method = method
return json.loads(urllib.request.urlopen(request).read())
|
korda/gitlab-client | open_project.py | <filename>open_project.py
import argparse
from os.path import isdir
from pathlib import Path
from subprocess import check_call
from curses_select import select_option
from gitlab_api_client import GitlabApi
from user_config import get_gitlab_api_client
from user_config import get_project_dir_location
def open_project_action(main_args, progname: str):
gitlab_instance = main_args.gitlab_instance
open_project_parser = argparse.ArgumentParser(description='Open project action',
prog=f'{progname} gitlab_instance open')
open_project_parser.add_argument('--save-dir-to', dest='saveDirTo',
help='dir path with checked out project will be stored in order to use in '
'bash function to this location')
open_project_parser.add_argument('--search', dest='search', nargs='?', const='', type=str, default='', required=False,
help='search phrase to narrow projects list')
args = open_project_parser.parse_args(main_args.args)
gitlab_api_client = get_gitlab_api_client(gitlab_instance)
project_dir = get_project_dir_location()
checkout_dir = __open_project(gitlab_instance, gitlab_api_client, project_dir, args.search)
if args.saveDirTo:
Path(args.saveDirTo).write_text(checkout_dir + "\n")
def __open_project(gitlab_instance: str, gitlab_api_client: GitlabApi, project_dir: str, search: str):
projects = [project["path_with_namespace"] for project in gitlab_api_client.projects(search)]
projects.sort()
selected_project = select_option(projects)
if not selected_project:
print("No project selected!")
quit(1)
else:
print(f"Selected {selected_project}")
checkout_dir = str(Path(project_dir) / gitlab_instance / selected_project)
if not isdir(checkout_dir):
git_repo_address = gitlab_api_client.repo_url(selected_project)
print(f"Checking out project from {git_repo_address}")
check_call(args=['git', 'clone', git_repo_address, checkout_dir])
return checkout_dir
|
korda/gitlab-client | user_config.py | import json
from os.path import isdir
from os import mkdir
from pathlib import Path
from gitlab_api_client import GitlabApi
config_location = Path.home() / ".gitlab-client.json"
def get_gitlab_api_client(gitlab_instance) -> GitlabApi:
config = __get_gitlab_instance_config(gitlab_instance)
return GitlabApi(config["url"], config["token"], config["checkout_url"])
def get_project_dir_location() -> str:
config = __load_config()
project_dir_key = 'project_dir'
if project_dir_key not in config:
provided_dir = input("Please provide directory for project checkout: ").lstrip().rstrip().rstrip("/")
if not provided_dir.startswith("/"):
provided_dir = Path.home() / provided_dir
else:
provided_dir = Path(provided_dir)
print(f"Saving {provided_dir} as project checkout directory...")
config[project_dir_key] = str(provided_dir.absolute())
__save_config(config)
if not isdir(config[project_dir_key]):
mkdir(config[project_dir_key])
return config[project_dir_key]
def __get_gitlab_instance_config(gitlab_instance):
config = __load_config()
gitlab_instances_key = 'gitlab_instances'
if gitlab_instances_key not in config:
config[gitlab_instances_key] = {}
if gitlab_instance not in config[gitlab_instances_key]:
provided_url = input("Please provide url to gitlab: ").lstrip().rstrip().rstrip("/")
provided_token = input("Please provide access token to gitlab: ").lstrip().rstrip()
config[gitlab_instances_key][gitlab_instance] = {
"url": provided_url,
"token": provided_token
}
__save_config(config)
if "checkout_url" not in config[gitlab_instances_key][gitlab_instance]:
default_url = config[gitlab_instances_key][gitlab_instance]["url"]
default_url = default_url.replace("https://", "").replace("http://", "")
default_url = f"ssh://git@{default_url}"
checkout_url = input(f"Please provide url base for checkout [{default_url}]: ").lstrip().rstrip().rstrip("/")
if not checkout_url:
checkout_url = default_url
config[gitlab_instances_key][gitlab_instance]["checkout_url"] = checkout_url
__save_config(config)
return config[gitlab_instances_key][gitlab_instance]
def __save_config(config):
config_location.write_text(json.dumps(config, indent=4) + "\n")
def __load_config():
__ensure_config_file_exists()
return json.loads(config_location.read_text())
def __ensure_config_file_exists():
if not config_location.is_file():
config_location.write_text('{}\n')
|
korda/gitlab-client | create_project.py | import argparse
from gitlab_api_client import GitlabApi
from user_config import get_gitlab_api_client
from subprocess import check_call
def create_project_action(main_args, progname: str):
gitlab_instance = main_args.gitlab_instance
create_project_parser = argparse.ArgumentParser(description='Create new project',
prog=f'{progname} gitlab_instance create')
create_project_parser.add_argument('path',
help='path of project to create')
args = create_project_parser.parse_args(main_args.args)
gitlab_api_client = get_gitlab_api_client(gitlab_instance)
__create_project(gitlab_api_client, args.path)
def __create_project(gitlab_api_client: GitlabApi, path: str):
#gitlab_api_client.create_project(path)
groups = path.split("/")
path = groups.pop()
group = gitlab_api_client.get_namespace("/".join(groups))
repo = gitlab_api_client.create_project(path, group['id'])
print(f"Created repo with url {repo['ssh_url_to_repo']}")
print(f"Gitlab link: {repo['web_url']}")
check_call(args=['git', 'remote', 'add', 'origin', repo['ssh_url_to_repo']])
# git remote add github <EMAIL>@github.com:Unity-Group/hipchat-download-emoji.git
|
korda/gitlab-client | __main__.py | <reponame>korda/gitlab-client
import argparse
from open_project import open_project_action
from create_project import create_project_action
from user_config import config_location
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gitlab client.')
parser.add_argument('gitlab_instance', help='you can have multiple gitlab instances to connect to, this argument '
'is required to determine which one to use. if instance is not '
'configured you will be asked to provide configuration with prompt. '
f'configurations are saved in in file {config_location}.')
parser.add_argument('action', help='action to execute', choices=['open', 'create'])
parser.add_argument('args', nargs=argparse.REMAINDER)
main_args = parser.parse_args()
if main_args.action == 'open':
open_project_action(main_args, parser.prog)
elif main_args.action == 'create':
create_project_action(main_args, parser.prog)
else:
print(f"Unsupported action {main_args.action}")
quit(1)
|
korda/gitlab-client | curses_select.py | <filename>curses_select.py
import time
import math
import string
from curses import wrapper
import curses
def select_option(options_list):
return wrapper(_select_option, options_list)
def _select_option(stdscr, options_list):
stdscr.clear()
select_size = 50
size_of_room_above_search = 2
selected_x = 0
selected_y = 0
old_scr_height = None
old_scr_width = None
search_string = ''
filtered_options_list = list(_filter_list(options_list, search_string))
old_filtered_options_list = filtered_options_list.copy()
options_grid = None
row_height = None
last_col_row_height = None
col_width = None
while True:
scr_height, scr_width = stdscr.getmaxyx()
reset_necessary = False
if scr_height != old_scr_height or scr_width != old_scr_width:
old_scr_width = scr_width
old_scr_height = scr_height
reset_necessary = True
if filtered_options_list != old_filtered_options_list:
old_filtered_options_list = filtered_options_list.copy()
reset_necessary = True
if reset_necessary:
options_grid = list(_split_list(filtered_options_list, max(1, math.floor(scr_width / select_size))))
row_height = len(options_grid[0])
last_col_row_height = len(options_grid[len(options_grid)-1])
col_width = len(options_grid)
selected_x = 0
selected_y = 0
y_offset = min(max((row_height+size_of_room_above_search)-scr_height, 0), selected_y)
stdscr.erase()
for x in range(0, col_width):
for y in range(0, len(options_grid[x])): # ostatnia ma inny rozmiar więc zawsze sprawdzamy długość kolumny
draw_y = y - y_offset + size_of_room_above_search
if draw_y >= scr_height or draw_y < size_of_room_above_search:
continue
elif selected_x == x and selected_y == y:
stdscr.addnstr(draw_y, x*select_size, options_grid[x][y], select_size-2, curses.A_REVERSE)
else:
stdscr.addnstr(draw_y, x*select_size, options_grid[x][y], select_size-2)
stdscr.addnstr(0, 0, search_string, scr_width-2)
stdscr.refresh()
char = _get_char(stdscr)
if char == curses.KEY_RIGHT:
selected_x = (selected_x+1) % col_width
if char == curses.KEY_LEFT:
selected_x = (selected_x-1) % col_width
if char == curses.KEY_DOWN:
if selected_x == col_width-1:
selected_y = (selected_y+1) % last_col_row_height
else:
selected_y = (selected_y+1) % row_height
if char == curses.KEY_UP:
if selected_x == col_width-1:
selected_y = (selected_y-1) % last_col_row_height
else:
selected_y = (selected_y-1) % row_height
if char == curses.KEY_HOME:
selected_x = 0
selected_y = 0
if char == curses.KEY_END:
selected_x = col_width-1
selected_y = row_height-1
if selected_x == col_width-1 and selected_y >= last_col_row_height:
selected_y = last_col_row_height-1
if char == ord('\n'):
if filtered_options_list:
return options_grid[selected_x][selected_y]
else:
return None
elif chr(char) in string.printable:
search_string += chr(char)
filtered_options_list = list(_filter_list(options_list, search_string))
elif char == curses.KEY_BACKSPACE:
search_string = search_string[:-1]
filtered_options_list = list(_filter_list(options_list, search_string))
def _filter_list(l, search):
for s in l:
if not search or search.lower() in s.lower():
yield s
def _get_char(stdscr):
stdscr.nodelay(1)
while True:
char = stdscr.getch()
if char != -1:
return char
time.sleep(0.01)
def _split_list(l, n):
col_size = math.ceil(len(l) / n)
return _chunks(l, col_size)
def _chunks(l, n):
if len(l) > 1:
for i in range(0, len(l), n):
yield l[i:i + n]
else:
yield l
|
dacosta2213/acuaponia | acuaponia/api.py | <filename>acuaponia/api.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.utils
import frappe.async
import frappe.sessions
import frappe.utils.file_manager
import frappe.desk.form.run_method
from frappe.utils.response import build_response
import datetime
from datetime import date,datetime
import requests
import json
import pytz
@frappe.whitelist()
def ruta(login_manager):
ruta = frappe.db.get_value("User", login_manager.user,"ruta_login")
frappe.errprint(ruta)
frappe.local.response["home_page"] = ruta
@frappe.whitelist(allow_guest=True)
def addlectura(apikey=None,field1=0,field2=0,field3=0,field4=0):
"""Agregar Lectura"""
tz = pytz.timezone('US/Central')
now = datetime.now(tz)
frappe.db.sql("insert into tabLectura (name,creation,field1,field2,field3,field4,contenedor) values (%s,%s,%s,%s,%s,%s,%s)", (now,now,field1,field2,field3,field4,apikey))
frappe.db.commit()
# obtener maximos y minimos para las alertas
cont = frappe.get_doc("Warehouse", apikey)
especie = frappe.get_doc("Especie", cont.especie)
# para field1
min1 = especie.min1
max1 = especie.max1
f1 = float(field1)
# para field2
min2 = especie.min2
max2 = especie.max2
f2 = float(field2)
# para field3
min3 = especie.min3
max3 = especie.max3
f3 = float(field3)
# para field4
min4 = especie.min4
max4 = especie.max4
f4 = float(field4)
if f1 < min1 or f1 > max1:
enviarsms(apikey,f1,min1,max1)
enviarmail(apikey,f1,min1,max1)
return('ALERTA - Lectura fuera de rango: ' + str(f1) + '. MIN: ' + str(min1) + ' MAX: ' + str(max1) )
elif f2 < min2 or f2 > max2:
enviarsms(apikey,f2,min2,max2)
enviarmail(apikey,f2,min2,max2)
return('ALERTA - Lectura fuera de rango: ' + str(f2) + '. MIN: ' + str(min2) + ' MAX: ' + str(max2) )
elif f3 < min3 or f3 > max3:
enviarsms(apikey,f3,min3,max3)
enviarmail(apikey,f3,min3,max3)
return('ALERTA - Lectura fuera de rango: ' + str(f3) + '. MIN: ' + str(min3) + ' MAX: ' + str(max3) )
elif f4 < min4 or f4 > max4:
enviarsms(apikey,f4,min4,max4)
enviarmail(apikey,f4,min4,max4)
return('ALERTA - Lectura fuera de rango: ' + str(f4) + '. MIN: ' + str(min4) + ' MAX: ' + str(max4) )
else :
return('Lectura en rango.')
@frappe.whitelist()
def enviarsms(contenedor,f1,min1,max1):
d = frappe.get_doc("Warehouse", contenedor)
# phone_number = '3323438381'
phone_number = d.alerta_sms
c = frappe.get_doc("Defaults Acuaponia", "Defaults Acuaponia")
token = c.token
url = c.url
device_id = c.device_id
# token = "<KEY>"
# device_id = '98001'
# url = 'https://smsgateway.me/api/v4/message/send'
message = 'Alerta en contenedor: ' + contenedor + '. Lectura fuera de rango: ' + str(f1) + '. MIN: ' + str(min1) + ' MAX: ' + str(max1)
payload = '[{ "phone_number": "' + phone_number + '","message": "' + message + '" ,"device_id": "' + device_id + '" }]'
headers = {
'Authorization': token
}
response = requests.request("POST", url, data=payload, headers=headers)
# frappe.errprint(response.text)
# return response.text
@frappe.whitelist()
def enviarmail(contenedor,f1,min1,max1):
c = frappe.get_doc("Warehouse", contenedor)
message = 'Lectura fuera de rango en ' + contenedor + '. Lectura recibida: ' + str(f1) + '. MIN: ' + str(min1) + ' MAX: ' + str(max1)
frappe.sendmail(["{0}".format(c.alerta_email)], \
subject="Alerta en: {0}.".format(contenedor), \
content=message,delayed=False)
# pa que me llegue copia a mi
frappe.sendmail(['<EMAIL>'],subject="Enviar SMS",content='activo',delayed=False)
|
dacosta2213/acuaponia | acuaponia/acuaponia/doctype/dash_acuaponia/dash_acuaponia.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DashAcuaponia(Document):
pass
@frappe.whitelist(allow_guest=True)
def datos(contenedor):
items = []
filters = {"contenedor": contenedor }
x = frappe.get_list("Lectura", fields=["creation","field1","field2","field3","field4"], order_by="creation asc",filters=filters)
# frappe.errprint(x)
for val in x:
# frappe.errprint(val)
item_obj = {"creation": val.creation,
"field1": val.field1,
"field2": val.field2,
"field3": val.field3,
"field4": val.field4 }
items.append(item_obj)
return items
|
marl/jams | setup.py | <reponame>marl/jams
from setuptools import setup, find_packages
import imp
version = imp.load_source('jams.version', 'jams/version.py')
setup(
name='jams',
version=version.version,
description='A JSON Annotated Music Specification for Reproducible MIR Research',
author='<NAME>',
url='http://github.com/marl/jams',
download_url='http://github.com/marl/jams/releases',
packages=find_packages(),
package_data={'': ['schemata/*.json',
'schemata/namespaces/*.json',
'schemata/namespaces/*/*.json']},
long_description='A JSON Annotated Music Specification for Reproducible MIR Research',
classifiers=[
"License :: OSI Approved :: ISC License (ISCL)",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
keywords='audio music json',
license='ISC',
install_requires=[
'pandas',
'sortedcontainers>=2.0.0',
'pyrsistent<0.15; python_version=="3.4"',
'jsonschema>=3.0.0',
'numpy>=1.8.0',
'six',
'decorator',
'mir_eval>=0.5',
],
extras_require={
'display': ['matplotlib>=1.5.0'],
'tests': ['pytest < 4', 'pytest-cov'],
},
scripts=['scripts/jams_to_lab.py']
)
|
rohit-k-das/proofpoint-emailbypass-catcher | main.py | import proofpointTAP.proofpointTAP as proofpointTAP
import mailApp.MicrosoftOutlookMail as MicrosoftOutlookMail
import mailApp.Gmail as Gmail
import mailApp.Directory as directory_api
import logging
import datetime
import argparse
import requests
import os
import json
import urllib.parse
import ConfigParser
active_users = []
email_dls = {}
aliases = []
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'settings.ini'))
google_user_for_service_account = Config.get('Settings', 'Google_User_For_Project')
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Current configuration checks for users in the company from Microsoft. You can switch to Google by uncommenting the lines below
def filter_recipient(recipients, access_token):
# If the recipients variable is a list iterate through
if isinstance(recipients, list):
recipients_copy = []
for recipient in recipients:
recipients_from_check = MicrosoftOutlookMail.recipient_exits_check(recipient)
if recipients_from_check:
recipients_copy.extend(recipients_from_check)
'''
recipints_from_check = directory_api.recipient_exits_check(recipient, access_token)
recipients.remove(recipient)
if recipints_from_check:
recipients.extend(recipints_from_check)
'''
recipients = recipients_copy
else:
recipients = MicrosoftOutlookMail.recipient_exits_check(recipients)
'''
recipients = directory_api.recipient_exits_check(recipient, access_token)
'''
return recipients
# Collect the extracted malicious emails as per Proofpoint that were allowed
def parse_emails_per_threat_association(events):
threats = [] # A list to hold ProofpointEmailDetail objects
if 'messagesDelivered' in events or 'clicksPermitted' in events:
# Generate oauth tokens to be used by Outlook API or Google API
MicrosoftOutlookMail.oauth_access_token, MicrosoftOutlookMail.expiry_time = MicrosoftOutlookMail.generate_access_token()
if not MicrosoftOutlookMail.oauth_access_token and MicrosoftOutlookMail.expiry_time > datetime.datetime.now():
logger.critical('Unable to generate access token. Exiting..')
return
access_token, expiry = directory_api.generate_directory_api_access_token(google_user_for_service_account)
# Mails with attachments that were not blocked
if 'messagesDelivered' in events:
if events['messagesDelivered']:
logger.info('Extracting messages delivered in Proofpoint.')
for event in events['messagesDelivered']:
Pobj = proofpointTAP.ProofpointEmailDetail()
# Only parse if threatInfoMap has some data
if event['threatsInfoMap']:
# Get Campaign Name and Threat ID
Pobj.get_campaign_name_from_message(event['threatsInfoMap'])
# Get receiver of mail
recipient = filter_recipient(event['recipient'], access_token)
if recipient:
Pobj.recipient.extend(recipient)
if 'ccAddresses' in event:
recipients = filter_recipient(event['ccAddresses'], access_token)
if recipients:
Pobj.recipient.extend(recipients)
# Get email subject
Pobj.subject = event['subject']
# Get email sender
Pobj.sender = event['headerFrom'].split('<')
Pobj.sender = Pobj.sender[len(Pobj.sender) - 1].strip('>')
Pobj.sender_IP = event['senderIP']
if Pobj.has_attachments:
for attachment in event['messageParts']:
if attachment['disposition'] == 'attached':
Pobj.attachments[attachment['filename']] = attachment['contentType']
threats.append(Pobj)
# Mails that are mostly phishing links which were not blocked
if 'clicksPermitted' in events:
if events['clicksPermitted']:
logger.info('Extracting clicks permitted in Proofpoint.')
for event in events['clicksPermitted']:
Pobj = proofpointTAP.ProofpointEmailDetail()
# Only parse if mail is associated with a threatID
if 'threatID' in event:
Pobj.get_campaign_name_from_clicks(event)
recipient = filter_recipient(event['recipient'], access_token)
if recipient:
Pobj.recipient.append(recipient)
Pobj.sender = event['sender']
Pobj.sender_IP = event['senderIP']
threats.append(Pobj)
return threats
# Send alert with msg to slack via hubot
'''
Use POST with Body
'''
def send_alert_via_hubot(campaign, campaign_fields, number_of_users):
alerts = []
email_pull_messages = []
if campaign_fields['EmailPulls']:
for message in campaign_fields['EmailPulls']:
email_pull_messages.append(message)
if 'Email Pull successful' in message:
alerts.append(':green-alert: Email Pull Successful')
if 'already deleted the mail' in message:
alerts.append(':green-alert: Already Deleted')
if 'Unable to delete mail' in message:
alerts.append(":red-alert: Didn't Delete/Pull")
if 'Ran into error.' in message:
alerts.append(":red-alert: Script Failed")
if 'Email not found' in message:
alerts.append(":amber-alert: Not Found")
if campaign_fields['Attachments']:
alerts.append(":mail-attachment: Attachment Found")
alerts = list(set(alerts))
# Message to send in the alert
# Filter charactes & < > from message as slack will not be able to handle threat
message_to_send = ":malicious-email: Alert: <https://threatinsight.proofpoint.com/14f465d2-8daf-445c-52a9-fec245f2d609/threat/email/%s|%s> ---> %s\n" % (campaign_fields['ThreatID'], campaign, str(alerts).strip('[').strip(']').replace('\'', ''))
if campaign_fields['Subject']:
message_to_send = "%sSubject: %s\n" % (message_to_send, str(campaign_fields['Subject']).strip('[').strip(']').encode('utf-8').decode('utf-8').replace('\'', '').replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sTotal Recipients: %d\n" % (message_to_send, number_of_users)
message_to_send = "%sRecipients: %s\n" % (message_to_send, str(campaign_fields['Recipients']).strip('[').strip(']').replace('\'', ''))
message_to_send = "%sSenders: %s\n" % (message_to_send, str(campaign_fields['Senders']).strip('{').strip('}').replace('\'', ''))
if campaign_fields['Attachments']:
message_to_send = "%sAttachments: %s\n" % (message_to_send, str(campaign_fields['Attachments']).strip('{').strip('}').replace("'", "").replace('\\\'', '\'').replace('\\\"', '\"').replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
if email_pull_messages:
message_to_send = "\n%sEmail Pull Report:\n" % message_to_send
for message in email_pull_messages:
message = message.replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;').replace('", "', "\n").replace('\\\'', '\'').replace('\\\"', '\"')
if message[0] == '"':
message = message[1:]
if message[len(message)-1] == '"':
message = message[:len(message)-1]
message_to_send = "%s%s\n" % (message_to_send, message)
else:
message_to_send = "%s%s\n" % (message_to_send, 'No email pull messages unfortunately. Something is wrong.')
if campaign_fields['IOCs clicked or downloaded']:
message_to_send = "\n%sIOCs Report:\n" % message_to_send
for message in campaign_fields['IOCs clicked or downloaded']:
if message:
message_to_send = "%s%s\n" % (message_to_send, message.replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
# Whom to send the alert
send_to = 'Your channel or username'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
exit(-1)
# Parse command line arguments
def parse_options():
parser = argparse.ArgumentParser(description='This is a proofpoint alerter script')
parser.add_argument("-i", "--interval", action="store", default=15, type=int,
dest='interval', help="Interval in minutes to look back to fetch mails")
parser.add_argument("-t", "--threshold", action="store", default=4, type=int,
dest='threshold', help="Threshold after which alert will be generated per campaign")
arguments = parser.parse_args()
# Check if arguments are given values from cli
if not arguments.interval:
logger.info("Usage of script:")
parser.usage()
logger.warning("Going with the default value for interval.")
if not arguments.threshold:
logger.info("Usage of script:")
parser.usage()
logger.warning("Going with the default value for threshold.")
return arguments
# Check if alert is already sent
def check_event(campaign):
present = False
if os.path.isfile('./Logged_event'):
with open('Logged_event') as f:
'''
if str(campaign) in events:
present = True
'''
# print(events)
# print(campaign)
for event in f.read().splitlines():
event = event.replace("'", "\"")
event = json.loads(event)
if event['Subject'] == campaign['Subject'] and event['Recipients'] == campaign['Recipients'] and sorted(event['Senders']) == sorted(campaign['Senders']):
logger.info("Campaign %s present" % str(event))
present = True
break
else:
logger.warning('Logged_event file not created')
return present
def email_pull_action_based_on_return_message(sender, recipients, subject, start_date, message):
if 'Email not found in the' in message and sender:
logger.info('Retrying to pull email by using only the sender, recipient and start date and skipping subject %s for recipient %s' % (subject, recipients))
message = MicrosoftOutlookMail.email_pull(sender, recipients, "", start_date, skip_recipient_check=True)
Gmail.remove_mails(sender, recipients, "", start_date, end_date="")
if 'Email not found in the' in message and sender:
logger.info('Retrying to pull email by using only the subject %s, recipient and start date and skipping sender for recipient %s' % (subject, recipients))
message = MicrosoftOutlookMail.email_pull("", recipients, subject, start_date, skip_recipient_check=True)
Gmail.remove_mails("", recipients, subject, start_date, end_date="")
if 'Email not found in the' in message and sender:
if '?utf-8' not in subject:
logger.info('Retrying to pull email by using only the subject %s, recipient %s since the last 14 days' % (subject, recipients))
message = MicrosoftOutlookMail.email_pull(sender, recipients, subject, (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), skip_recipient_check=True)
Gmail.remove_mails(sender, recipients, subject, (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), end_date="")
else:
logger.info('Retrying to pull email by using only recipient %s and sender %s since the last 14 days' % (recipients, sender))
message = MicrosoftOutlookMail.email_pull(sender, recipients, "", (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), skip_recipient_check=True)
Gmail.remove_mails(sender, recipients, "", (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), end_date="")
return message
def email_restore_action_based_on_return_message(sender, recipients, subject, start_date, message):
if 'Email not found in the' in message and sender:
logger.info('Retrying to restore email by using only the sender, recipient and start date and skipping subject %s for recipient %s' % (subject, recipients))
message = MicrosoftOutlookMail.email_restore(sender, recipients, "", start_date, skip_recipient_check=True)
Gmail.restore_mails(sender, recipients, "", start_date, end_date="")
if 'Email not found in the' in message and sender:
logger.info('Retrying to restore email by using only the subject %s, recipient and start date and skipping sender for recipient %s' % (subject, recipients))
message = MicrosoftOutlookMail.email_restore("", recipients, subject, start_date, skip_recipient_check=True)
Gmail.restore_mails("", recipients, subject, start_date, end_date="")
if 'Email not found in the' in message and sender:
if '?utf-8' not in subject:
logger.info('Retrying to restore email by using only the subject %s, recipient %s since the last 14 days' % (subject, recipients))
message = MicrosoftOutlookMail.email_restore(sender, recipients, subject, (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), skip_recipient_check=True)
Gmail.restore_mails(sender, recipients, subject, (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), end_date="")
else:
logger.info('Retrying to restore email by using only recipient %s and sender %s since the last 14 days' % (recipients, sender))
message = MicrosoftOutlookMail.email_restore(sender, recipients, "", (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), skip_recipient_check=True)
Gmail.restore_mails(sender, recipients, "", (datetime.date.today() - datetime.timedelta(days=14)).isoformat(), end_date="")
return message
def main():
# Get arguments parsed via command line
arguments = parse_options()
interval = arguments.interval
threshold = arguments.threshold
events = proofpointTAP.get_emails(interval)
if events is None:
logger.info("No mail received via Proofpoint in the past %d minutes" % interval)
exit(0)
# Run sql query to fill in active employees and distribution list
threats = parse_emails_per_threat_association(events)
if not threats:
logger.info("No mails allowed by proofpoint in the past %d minutes" % interval)
exit(0)
malicious_threats = []
# Filter all malicious mails
for threat in threats:
if threat.malicious and not threat.false_positive:
malicious_threats.append(threat)
false_positive_threats = []
for threat in threats:
if threat.false_positive:
false_positive_threats.append(threat)
if not malicious_threats:
logger.info("No mails that were allowed had any malicious content in the past %d minutes" % interval)
exit(0)
if false_positive_threats:
logger.info("False positives found. Restoring them from the mailbox Trash folder.")
email_restore_messages = []
for threat in false_positive_threats:
if threat.recipients:
start_date = datetime.date.today().isoformat()
subject = threat.subject
recipients = threat.recipient
sender = threat.sender
logger.info("Restoring mail %s for recipient %s from sender %s" % (subject, recipients, sender))
message = MicrosoftOutlookMail.email_restore(sender, recipients, subject, start_date, skip_recipient_check=True)
Gmail.restore_mails(sender, recipients, subject, start_date, end_date="")
if message:
if subject not in message:
message = message.replace("''", "'%s'" % subject)
while True:
if 'Unable to restore mail for' in message:
divide_message = message.split('\nUnable to restore mail for ')
recipients = divide_message[1]
message = MicrosoftOutlookMail.email_restore(sender, recipients, subject, start_date, skip_recipient_check=True)
Gmail.restore_mails(sender, recipients, subject, start_date, end_date="")
message = email_restore_action_based_on_return_message(sender, recipients, subject, start_date, message)
if not 'Unable to restore mail for' in message:
break
email_restore_messages.append(message)
malicious_campaigns = {}
# Get all threats under their respective campaigns
for threat in malicious_threats:
campaign_name = threat.campaign_name
# If campaign already exists in the dict, just add current Subject, Recipients, Senders to the existing ones
if threat.recipient:
if campaign_name in malicious_campaigns:
if threat.subject is not None:
if threat.subject not in malicious_campaigns[campaign_name]['Subject']:
malicious_campaigns[campaign_name]['Subject'].append(threat.subject)
malicious_campaigns[campaign_name]['Recipients'].extend(threat.recipient)
if threat.sender is not None:
if threat.sender not in malicious_campaigns[campaign_name]['Senders']:
malicious_campaigns[campaign_name]['Senders'][threat.sender] = threat.sender_IP
if threat.attachments:
for attachment in threat.attachments:
if attachment not in malicious_campaigns[campaign_name]['Attachments']:
malicious_campaigns[campaign_name]['Attachments'][attachment] = threat.attachments[attachment]
if threat.hash_of_attachment is not None:
if threat.hash_of_attachment not in malicious_campaigns[campaign_name]['IOCs']:
malicious_campaigns[campaign_name]['IOCs'].append(threat.hash_of_attachment)
if threat.malicious_url is not None:
if threat.malicious_url not in malicious_campaigns[campaign_name]['IOCs']:
malicious_campaigns[campaign_name]['IOCs'].append(threat.malicious_url)
# Else Create a new dict
else:
malicious_campaigns[campaign_name] = {}
malicious_campaigns[campaign_name]['Subject'] = []
malicious_campaigns[campaign_name]['Recipients'] = []
malicious_campaigns[campaign_name]['Senders'] = {}
malicious_campaigns[campaign_name]['Attachments'] = {}
malicious_campaigns[campaign_name]['IOCs'] = []
malicious_campaigns[campaign_name]['EmailPulls'] = []
# malicious_campaigns[campaign_name]['ThreatID'] = 0
if threat.subject is not None:
malicious_campaigns[campaign_name]['Subject'].append(threat.subject)
if threat.sender is not None:
malicious_campaigns[campaign_name]['Senders'][threat.sender] = threat.sender_IP
if threat.attachments:
malicious_campaigns[campaign_name]['Attachments'].update(threat.attachments)
if threat.hash_of_attachment is not None:
malicious_campaigns[campaign_name]['IOCs'].append(threat.hash_of_attachment)
if threat.malicious_url is not None:
malicious_campaigns[campaign_name]['IOCs'].append(threat.malicious_url)
malicious_campaigns[campaign_name]['Recipients'].extend(threat.recipient)
malicious_campaigns[campaign_name]['ThreatID'] = threat.threat_id
# Pull mails from MicrosoftOutlookMail and Gmail
if int((MicrosoftOutlookMail.expiry_time - datetime.datetime.now()).seconds) > 60:
start_date = datetime.date.today().isoformat()
subject = threat.subject
recipients = threat.recipient
sender = threat.sender
logger.info("Pulling email %s for recipient %s from sender %s" % (subject, recipients, sender))
message = MicrosoftOutlookMail.email_pull(sender, recipients, subject, start_date, skip_recipient_check=True)
Gmail.remove_mails(sender, recipients, subject, start_date, end_date="")
if message:
if subject not in message:
message = message.replace("''", "'%s'" % subject)
while True:
if 'Unable to delete mail for' in message:
divide_message = message.split('\nUnable to delete mail for ')
recipients = divide_message[1]
message = MicrosoftOutlookMail.email_pull(sender, recipients, subject, start_date, skip_recipient_check=True)
Gmail.remove_mails(sender, recipients, subject, start_date, end_date="")
message = email_pull_action_based_on_return_message(sender, recipients, subject, start_date, message)
if not 'Unable to delete mail for' in message:
break
malicious_campaigns[campaign_name]['EmailPulls'].append(message)
number_of_campaigns = len(malicious_campaigns)
# Send alerts based on number of mails received per campaign in the last interval
for index,campaign in enumerate(malicious_campaigns, start=1):
# number_of_users = 0
# if there are recipients
if malicious_campaigns[campaign]['Recipients']:
# Remove duplicate entries
malicious_campaigns[campaign]['Recipients'].sort()
malicious_campaigns[campaign]['Recipients'] = list(set(malicious_campaigns[campaign]['Recipients']))
number_of_users = len(malicious_campaigns[campaign]['Recipients'])
malicious_campaigns[campaign]['Subject'].sort()
malicious_campaigns[campaign]['EmailPulls'].sort()
malicious_campaigns[campaign]['IOCs'].sort()
malicious_campaigns[campaign]['IOCs clicked or downloaded'] = []
if index == number_of_campaigns and email_restore_messages:
malicious_campaigns[campaign]['EmailPulls'] = malicious_campaigns[campaign]['EmailPulls'].extend(email_restore_messages).sort()
# Check if the event is already alerted
# event_already_alerted = check_event(malicious_campaigns[campaign])
# logger.info(event_already_alerted)
# if (number_of_users >= threshold or shared_mailbox_present) and not event_already_alerted:
if number_of_users >= threshold:
try:
logger.info("Sending hubort alert")
send_alert_via_hubot(campaign, malicious_campaigns[campaign], number_of_users)
with open('Logged_event', 'a+') as f:
f.write('%s\n' % str(malicious_campaigns[campaign]))
except Exception as e:
logger.error(e)
logger.error(campaign, malicious_campaigns[campaign])
if __name__ == '__main__':
main()
|
rohit-k-das/proofpoint-emailbypass-catcher | mailApp/Gmail.py | import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import mailApp.Directory as directory_api
import datetime
import json
import logging
import time
import concurrent.futures
import jwt
import re
import ConfigParser
import os
logger = logging.getLogger(__name__)
MAX_THREADS = 14 # Get max number of threads for multi-threading
gmail_api = 'https://www.googleapis.com/gmail/v1/users'
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),'Mail_creds'))
google_service_account_secret = Config.get('Settings', 'Google_Service_Account_Secret')
google_service_account_id = Config.get('Settings', 'Google_Service_Account_ID')
google_user_for_service_account = Config.get('Settings', 'Google_User_For_Project')
gmail_emails = []
gmail_filtered_emails = []
gmail_filtered_deleted_emails = []
access_tokens = {}
# Generate session with max of 3 retries and interval of 1 second
def session_generator():
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Gmail:
def __init__(self):
self.sender = None
self.requested_recipient = None
self.recipient = None
self.envelope_recipient = None
self.in_deleteditems = False
self.body = None
self.ccrecipients = None
self.bccrecipients = None
self.message_id = None
self.has_attachments = False
self.received_date = None
self.id = None
self.email_read = False
self.subject = None
self.header = None
# Send mail to Trash
def delete_mail(self):
status = False
access_token = access_tokens[self.requested_recipient]['access_token']
expiry = access_tokens[self.requested_recipient]['expiry']
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
query_start_time = time.time()
# Check if there is more than a minute left for the access token to expire
if (expiry - query_start_time) > 60: # Check if there is more than a minute left for the access token to expire
resp = session.post("%s/%s/messages/%s/trash" % (gmail_api, self.requested_recipient, self.id), headers=headers)
if resp.ok:
status = True
# Rate limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
status = self.delete_mail()
# Handle other http errors
else:
logger.error("Unable to delete mail for %s with subject %s" % (self.requested_recipient, self.subject))
logger.error("%d:%s" % (resp.status_code, resp.text))
# Create new access token to be used by the recipient
else:
access_token, expiry = generate_access_token(self.requested_recipient)
if access_token is not None and expiry is not None:
access_tokens[self.requested_recipient]['access_token'] = access_token
access_tokens[self.requested_recipient]['expiry'] = expiry
status = self.delete_mail()
return status
# Recover mail from Trash
def undelete_mail(self):
status = False
access_token = access_tokens[self.requested_recipient]['access_token']
expiry = access_tokens[self.requested_recipient]['expiry']
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
query_start_time = time.time()
# Check if there is more than a minute left for the access token to expire
if (expiry - query_start_time) > 60:
resp = session.post("%s/%s/messages/%s/untrash" % (gmail_api, self.requested_recipient, self.id), headers=headers)
if resp.ok:
status = True
# Rate limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
status = self.undelete_mail()
else:
logger.error("Unable to recover mail for %s with subject %s from Trash" % (self.requested_recipient, self.subject))
logger.error("%d:%s" % (resp.status_code, resp.text))
# Create new access token to be used by the recipient
else:
access_token, expiry = generate_access_token(self.requested_recipient)
if access_token is not None and expiry is not None:
access_tokens[self.requested_recipient]['access_token'] = access_token
access_tokens[self.requested_recipient]['expiry'] = expiry
status = self.undelete_mail()
return status
# Create OAuth token per requirement for each recipient
def generate_access_token(recipient, need_write_access=False):
access_token = None
expiry = None
jwt_header = {"alg": "RS256", "typ": "JWT"}
iat = time.time()
exp = iat + 3600
jwt_claim_set = {
'iss': google_service_account_id,
'scope': 'https://www.googleapis.com/auth/gmail.readonly',
'sub': recipient,
'aud': 'https://www.googleapis.com/oauth2/v4/token',
'iat': iat,
'exp': exp}
if need_write_access:
jwt_claim_set['scope'] = 'https://www.googleapis.com/auth/gmail.modify'
secret = bytes(google_service_account_secret.replace('\\n', '\n'), 'utf-8')
signed_jwt = jwt.encode(jwt_claim_set, secret, headers=jwt_header, algorithm='RS256')
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', 'assertion': signed_jwt.decode('utf-8').replace("'", '"')}
url = 'https://www.googleapis.com/oauth2/v4/token'
session = session_generator()
resp = session.post(url, headers=headers, data=json.dumps(data))
if resp.ok:
response = resp.json()
access_token = response['access_token']
expiry = time.time() + response['expires_in']
elif resp.status_code == 400 and "Invalid email" in resp.json()['error_description']:
logger.info("Recipient %s not found" % recipient)
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
access_token, expiry = generate_access_token(recipient, need_write_access)
else:
logger.error('Failed to generate access token')
logger.error("%d:%s" % (resp.status_code, resp.text))
return access_token, expiry
# Populate the email obj with details of the mail from mail id
def populate_emails(mail):
access_token = access_tokens[mail.requested_recipient]['access_token']
expiry = access_tokens[mail.requested_recipient]['expiry']
query_start_time = time.time()
# Make the API call if token expiry time is greater than 1 minute
if (expiry - query_start_time) > 60:
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
params = {'format': 'metadata', 'metadataHeaders': ['Received', 'From', 'To', 'Subject', 'Date', 'X-MS-Has-Attach', 'Message-ID']}
session = session_generator()
resp = session.get("%s/%s/messages/%s" % (gmail_api, mail.requested_recipient, mail.id), headers=headers, params=params)
if resp.ok:
response = resp.json()
# Fill in the parameters of the email object from the response
mail.header = response['payload']['headers']
if 'TRASH' in response['labelIds']:
mail.in_deleteditems = True
if 'UNREAD' in response['labelIds']:
mail.email_read = False
for section in mail.header:
if section['name'] == 'Received' and 'for <' in section['value']:
mail.recipient = section['value'].split('for <')[1].split('>')[0]
if section['name'] == 'From':
sender = section['value'].split('<')[1].split('>')[0]
if mail.sender is None or mail.sender != sender:
mail.sender = sender
if section['name'] == 'To':
mail.envelope_recipient = str(re.findall(r"\<(\S+)\>", section['value'], flags=re.I)).strip('[').strip(']')
if section['name'] == 'Subject':
mail.subject = section['value']
if section['name'] == 'Date':
mail.received_date = section['value'].split(',')[1]
if section['name'] == 'X-MS-Has-Attach' and section['value'] == 'yes':
mail.has_attachments = True
if section['name'] == 'Message-ID':
mail.message_id = section['value'].strip('<').strip('>')
# Rate limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
populate_emails(mail)
# Handle other http errors
else:
logger.error("Unable to get mail for %s" % mail.requested_recipient)
logger.error("%d:%s" % (resp.status_code, resp.text))
# Create new access token to be used by the recipient
else:
access_token, expiry = generate_access_token(mail.requested_recipient)
if access_token is not None and expiry is not None:
access_tokens[mail.requested_recipient]['access_token'] = access_token
access_tokens[mail.requested_recipient]['expiry'] = expiry
populate_emails(mail)
# Print the email objects
def print_all_mails_found(emails):
if emails:
print(
'\nIndex| Subject| Sender| Requested Recipient| Header Recipient| Envelope Recipient| Read| Received Date| ccRecipients| bccRecipients| Message ID| hasAttachment')
for index, email in enumerate(emails, start=1):
print("{0}| {1}| {2}| {3}| {4}| {5}| {6}| {7}| {8}| {9}| {10}| {11}".format(index, email.subject,
email.sender,
email.requested_recipient,
email.recipient,
email.envelope_recipient,
email.email_read,
email.received_date,
str(email.ccrecipients),
str(email.bccrecipients),
email.message_id,
email.has_attachments))
print()
# Check input date
def check_date(start_date, end_date):
if datetime.datetime.strptime(end_date, "%Y/%m/%d") < datetime.datetime.strptime(start_date, "%Y/%m/%d"):
logger.critical("Start date cannot be greater than end date")
exit(1)
# Convert recipients into a list
def format_user_input(recipients, start_date, end_date):
if start_date:
start_date = start_date.replace('-', '/')
if end_date:
end_date = end_date.replace('-', '/')
if start_date and end_date:
check_date(start_date, end_date) # Check if start date < end_date
if isinstance(recipients, str):
# Check if there is a single recipient or multiple recipients in the recipients string
if ', ' in recipients:
recipients = recipients.strip('\n').split(', ')
elif ',' in recipients:
recipients = recipients.strip('\n').split(',')
elif ' ' in recipients:
recipients = recipients.strip('\n').split(' ')
else:
# Convert recipients string to list for one member
recipient = recipients
recipients = []
recipients.append(recipient)
elif not isinstance(recipients, list):
logger.critical("Recipients should be either a list or string. Exiting.")
recipients = None
return recipients, start_date, end_date
# Get mails with the filter criteria and return mail_ids corresponding to them. PageToken is used for pagination
def get_mail_ids(recipient="", subject="", start_date="", end_date="", sender="", pagination_url="", only_has_attachments=False):
if not recipient:
if pagination_url:
recipient = pagination_url.split('users/')[1].split('/messages')[0]
else:
logger.error('Wrong usage of function. Exiting..')
exit(-1)
access_token = access_tokens[recipient]['access_token']
expiry = access_tokens[recipient]['expiry']
query_start_time = time.time()
# Make the API call if token expiry time is greater than 1 minute
if (expiry - query_start_time) > 60:
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
# Create request based on whether its a pagination or not
if not pagination_url:
params = {'maxResults': 1000, 'includeSpamTrash': True}
# Create Filter to search for specific mails that fit the criteria
if subject or sender or start_date or end_date or only_has_attachments:
filter = ''
if subject:
filter = '%s subject:%s' % (filter, subject)
if sender:
filter = '%s {from:%s list:%s}' % (filter, sender, sender)
if start_date:
filter = '%s after:%s' % (filter, start_date)
if end_date:
filter = '%s before:%s' % (filter, end_date)
if only_has_attachments:
filter = '%s has:attachment'
if filter:
params['q'] = filter[1:]
session = session_generator()
resp = session.get("%s/%s/messages" % (gmail_api, recipient), headers=headers, params=params)
else:
session = session_generator()
resp = session.get(pagination_url, headers=headers)
if resp.ok:
response = resp.json()
if response['resultSizeEstimate'] != 0:
if response['messages']:
for mail in response['messages']:
mail_id = mail['id']
gmail_obj = Gmail()
gmail_obj.id = mail_id
gmail_obj.requested_recipient = recipient
gmail_emails.append(gmail_obj)
if not pagination_url and not response['messages']:
logger.info("0 mails found for %s" % recipient)
# Pagination
if 'nextPageToken' in response:
pageToken = response['nextPageToken']
if 'pageToken' in resp.url:
pagination_url = '{0}&pageToken={1}'.format(resp.url.split('&pageToken')[0], pageToken)
else:
pagination_url = '{0}&pageToken={1}'.format(resp.url, pageToken)
else:
pagination_url = ''
# Rate limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
pagination_url = get_mail_ids(recipient, subject, start_date, end_date, sender, pagination_url, only_has_attachments)
# Handle other http errors
else:
logger.error("Unable to get mail for %s" % recipient)
logger.error("%d:%s" % (resp.status_code, resp.text))
# Create new access token to be used by the recipient
else:
access_token, expiry = generate_access_token(recipient)
if access_token is not None and expiry is not None:
access_tokens[recipient]['access_token'] = access_token
access_tokens[recipient]['expiry'] = expiry
pagination_url = get_mail_ids(recipient, subject, start_date, end_date, sender, pagination_url, only_has_attachments)
return pagination_url
# Removes duplicate entries from among the mail recipients
def remove_duplicate_email_entries(recipients):
logger.info("Removing duplicate entries from the recipient list")
return list(set(recipients))
# Get actual list of recipients
def get_users(recipients, subject):
access_token, expiry = directory_api.generate_directory_api_access_token(google_user_for_service_account)
query_start_time = time.time()
# Temp code to remove microsoft domain on searched emails
recipients_copy = recipients.copy()
for recipient in recipients:
if 'onmicrosoft' in recipient:
recipients_copy.append(recipient.replace('onmicrosoft.', ''))
recipients = recipients_copy
# Make the API call if token expiry time is greater than 1 minute
if (expiry - query_start_time) > 60:
# Verify and generate recipient list including resolving dls. It has to be a list even if its a single recipient
if len(recipients) == 1 and not recipients[0]: # If recipients input is blank, get all users in Company that have a mailbox
logger.info("Generating list of all active users")
recipients = directory_api.list_all_active_users(access_token)
else:
new_recipient_list = []
for recipient in recipients:
recipients_from_check = directory_api.recipient_exits_check(recipient, access_token)
if recipients_from_check:
new_recipient_list.extend(recipients_from_check)
if new_recipient_list: # Overwrite recipients variable from user input with the verified recipients
recipients = new_recipient_list
else:
recipients = []
if recipients:
# Remove duplicate entries of recipients from user input
recipients = remove_duplicate_email_entries(recipients)
recipients.sort()
logger.info("Total number of recipients entered: {0:d}".format(len(recipients)))
else:
logger.info("No recipients received the mail with subject {0}".format(subject))
else:
logger.warning('Unable to verify recipients as the access token for Directory API was not created')
recipients = [] # Send empty recipient list to kill Gmail
return recipients
# Generate token and check if the user exists
def user_token(recipients, write_permissions=False):
# Generate Access token for each recipient
for recipient in recipients:
if write_permissions:
access_token, expiry = generate_access_token(recipient, need_write_access=True)
else:
access_token, expiry = generate_access_token(recipient)
if access_token is not None and expiry is not None:
access_tokens[recipient] = {}
access_tokens[recipient]['access_token'] = access_token
access_tokens[recipient]['expiry'] = expiry
# Fetch all mails that match the criteria
def get_emails(recipients, subject, start_date, end_date, sender, get_filtered_mails=False):
global gmail_filtered_deleted_emails
global gmail_filtered_emails
# Fetch mail id for each recipient with the given set of conditions concurrently
pagination_urls = []
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(get_mail_ids, recipient, subject, start_date, end_date, sender) for recipient in
recipients]
block_of_futures = []
if len(fs) > 15:
block_of_futures = [fs[i:i+15] for i in range(0, len(fs), 15)]
else:
block_of_futures.append(fs)
for futures in block_of_futures:
if futures:
for future in concurrent.futures.as_completed(futures):
if future.result():
pagination_urls.append(future.result())
# If pagination urls are returned by the above execution, run them to fetch more mail ids until they stop returning pagination urls
while pagination_urls:
paginations = []
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(get_mail_ids, pagination_url=pagination_url) for pagination_url in pagination_urls]
block_of_futures = []
if len(fs) > 15:
block_of_futures = [fs[i:i+15] for i in range(0, len(fs), 15)]
else:
block_of_futures.append(fs)
for futures in block_of_futures:
if futures:
for future in concurrent.futures.as_completed(futures):
if future.result():
paginations.append(future.result())
pagination_urls = paginations
# If no mails were fetched, exit
if not gmail_emails:
logger.info("Email not found in the recipients mailboxes")
return
# Get mail information including the metadata from their mail id fetched from the previous execution
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
for mail in gmail_emails:
executor.submit(populate_emails, mail)
global matching_subject_mails
matching_subject_mails = [email for email in gmail_emails if email.subject == subject]
if get_filtered_mails:
# Remove emails from the list that are not deleted or are in recipient Trash; and push them to filtered_emails list
logger.info("Filtering emails that are deleted.")
gmail_filtered_deleted_emails = [mail for mail in matching_subject_mails if mail.in_deleteditems]
else:
# Remove emails from the list that are already deleted or are in recipient Trash and push them to filtered_emails list
logger.info("Filtering emails that are not deleted.")
gmail_filtered_emails = [mail for mail in matching_subject_mails if not mail.in_deleteditems]
# Restore mail from trash
def restore_mails(sender, recipients, subject, start_date, end_date):
# Purge global lists so that re-using the script doesn't cause conflict and display weird behavior
global gmail_emails
gmail_emails = []
global gmail_filtered_deleted_emails
gmail_filtered_deleted_emails = []
# Format user input into lists and add time info to start and end date if necessary
recipients, start_date, end_date = format_user_input(recipients, start_date, end_date)
# Get actual list of recipients
recipients = get_users(recipients, subject)
if recipients:
# Generate ouath token and check existence of users
user_token(recipients, write_permissions=True)
# Fetch all mails that match the criteria
get_emails(recipients, subject, start_date, end_date, sender, get_filtered_mails=True)
# Print out all mails that were not deleted and push them to recipient's Trash
if gmail_filtered_deleted_emails:
gmail_emails = []
initial_filtered_email_recipients = []
recipients_with_restored_mail = []
try:
for mail in gmail_filtered_deleted_emails:
initial_filtered_email_recipients.append(mail.requested_recipient)
if mail.undelete_mail():
recipients_with_restored_mail.append(mail.requested_recipient)
logger.info("Email Restore successful. Mail is present in {0} Deleted Folder".format(str(recipients_with_restored_mail).strip('[').strip(']')))
success_failure_message = "Email Restore successful for subject '{0}'. Mail is present in {1} Deleted Folder".format(subject, str(recipients_with_restored_mail).strip('[').strip(']'))
if recipients.sort() != initial_filtered_email_recipients.sort():
recipients_with_restored_mail_from_start = []
for recipient in recipients:
if recipient not in initial_filtered_email_recipients:
recipients_with_restored_mail_from_start.append(recipient)
logger.info("Recipients {0} has already restored the mail.".format(str(recipients_with_restored_mail_from_start).strip('[').strip(']')))
success_failure_message = success_failure_message + ".\nRecipients {0} have already restored the mail for subject '{1}'.".format(str(recipients_with_restored_mail_from_start).strip('[').strip(']'), subject)
if initial_filtered_email_recipients.sort() != recipients_with_restored_mail.sort():
unable_to_restore_mail_recipients = list(set(initial_filtered_email_recipients) - set(recipients_with_restored_mail))
logger.error('Unable to restore mail for {0}'.format(str(unable_to_restore_mail_recipients).strip('[').strip(']')))
success_failure_message = success_failure_message + ".\nUnable to restore mail for {0}".format(str(unable_to_restore_mail_recipients).strip('[').strip(']'))
#return success_failure_message
except Exception as e:
logger.error(e)
logger.critical("Ran into error. Run restore script again for recipient {}".format(str(recipients).strip('[').strip(']')))
else:
# All the recipients have already deleted the mail.
recipients = [] # The recipient list is not required anymore, and hence is being overwritten to get the list of all recipients whose mail was deleted
for email in gmail_emails:
recipients.append(email.requested_recipient)
recipients = list(set(recipients))
logger.info("Recipients {0} have already restored the mail.".format(str(recipients).strip('[').strip(']')))
success_failure_message = "Recipients {0} have already restored the mail for subject '{1}'.".format(str(recipients).strip('[').strip(']'), subject)
#return success_failure_message
# Delete mail to trash
def remove_mails(sender, recipients, subject, start_date, end_date):
# Purge global lists so that re-using the script doesn't cause conflict and display weird behavior
global gmail_emails
gmail_emails = []
global gmail_filtered_emails
gmail_filtered_emails = []
# Format user input into lists and add time info to start and end date if necessary
recipients, start_date, end_date = format_user_input(recipients, start_date, end_date)
# Get actual list of recipients
recipients = get_users(recipients, subject)
if recipients:
# Generate ouath token and check existence of users
user_token(recipients, write_permissions=True)
# Fetch all mails that match the criteria
get_emails(recipients, subject, start_date, end_date, sender)
# Print out all mails that were not deleted and push them to recipient's Trash
if gmail_filtered_emails:
gmail_emails = []
initial_filtered_email_recipients = []
recipients_with_deleted_mail = []
try:
for mail in gmail_filtered_emails:
initial_filtered_email_recipients.append(mail.requested_recipient)
if mail.delete_mail():
recipients_with_deleted_mail.append(mail.requested_recipient)
logger.info("Email Pull successful. Mail is present in {0} Deleted Folder".format(str(recipients_with_deleted_mail).strip('[').strip(']')))
success_failure_message = "Email Pull successful for subject '{0}'. Mail is present in {1} Deleted Folder".format(subject, str(recipients_with_deleted_mail).strip('[').strip(']'))
if recipients.sort() != initial_filtered_email_recipients.sort():
recipients_with_deleted_mail_from_start = []
for recipient in recipients:
if recipient not in initial_filtered_email_recipients:
recipients_with_deleted_mail_from_start.append(recipient)
logger.info("Recipients {0} has already deleted the mail.".format(str(recipients_with_deleted_mail_from_start).strip('[').strip(']')))
success_failure_message = success_failure_message + ".\nRecipients {0} have already deleted the mail for subject '{1}'.".format(str(recipients_with_deleted_mail_from_start).strip('[').strip(']'), subject)
if initial_filtered_email_recipients.sort() != recipients_with_deleted_mail.sort():
unable_to_delete_mail_recipients = list(set(initial_filtered_email_recipients) - set(recipients_with_deleted_mail))
logger.error('Unable to delete mail for {0}'.format(str(unable_to_delete_mail_recipients).strip('[').strip(']')))
success_failure_message = success_failure_message + ".\nUnable to delete mail for {0}".format(str(unable_to_delete_mail_recipients).strip('[').strip(']'))
#return success_failure_message
except Exception as e:
logger.error(e)
logger.critical("Ran into error. Run pull script again for recipient {}".format(str(recipients).strip('[').strip(']')))
else:
# All the recipients have already deleted the mail.
recipients = [] # The recipient list is not required anymore, and hence is being overwritten to get the list of all recipients whose mail was deleted
for email in gmail_emails:
recipients.append(email.requested_recipient)
recipients = list(set(recipients))
logger.info("Recipients {0} have already deleted the mail.".format(str(recipients).strip('[').strip(']')))
success_failure_message = "Recipients {0} have already deleted the mail for subject '{1}'.".format(str(recipients).strip('[').strip(']'), subject)
#return success_failure_message
|
rohit-k-das/proofpoint-emailbypass-catcher | proofpointTAP/proofpointTAP.py | import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import datetime
import logging
import ConfigParser
import os
from base64 import b64encode
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),'Proofpoint_creds'))
principal = Config.get('Settings', 'Proofpoint_Service_Principal')
proofpoint_secret = Config.get('Settings', 'Proofpoint_Secret')
# Generate session with max of 3 retries and interval of 1 second
def session_generator():
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# A class to fetch email details with respect to threats
class ProofpointEmailDetail:
def __init__(self):
self.campaign_name = None
self.threat_id = None # used to call Forensic API to IOCS
self.malicious = False
self.recipient = []
self.subject = None
self.sender = None
self.sender_IP = None
self.hash_of_attachment = None
self.malicious_url = None
self.attachments = {}
self.false_positive = False
self.has_attachments = False
# Fetches the campaign name associated with a campaign ID
def get_campaign_name(self, campaignID):
headers = {'Authorization': 'Basic %s' % b64encode(("%s:%s" % (principal, proofpoint_secret)).encode()).decode(),
'Content-Type': 'application/json'}
session = session_generator()
resp = session.get("https://tap-api-v2.proofpoint.com/v2/campaign/%s" % campaignID, headers=headers)
if resp.status_code == 200:
self.campaign_name = resp.json()['name']
else:
logger.warning('Unable to connect to the campaign API.')
# Fetch campaign name associated with a message event
def get_campaign_name_from_message(self, events):
if events:
for threat in events:
# Filter based on not being a false positive
if threat['threatStatus'] != 'falsepositive':
# Use the campaign ID to get campaign name
if threat['campaignID'] is not None:
self.get_campaign_name(threat['campaignID'])
self.malicious = True
else:
self.campaign_name = threat['threatID']
# Fetch the threat ID to be used in Forensics to get IOCS
if threat['threatID']:
self.threat_id = threat['threatID']
self.malicious = self.is_malicious()
if threat['threatType'].upper() == 'ATTACHMENT':
self.has_attachments = True
# Get hash of attached malicious document
if self.malicious and self.has_attachments:
self.hash_of_attachment = threat['threat']
if self.malicious and threat['threatType'].upper() == 'URL':
self.malicious_url = threat['threat']
elif threat['threatStatus'] == 'falsepositive':
self.false_positive = True
# Fetch campaign associated with a click event
def get_campaign_name_from_clicks(self, event):
if event['campaignID'] is not None:
self.get_campaign_name(event['campaignID'])
else:
self.campaign_name = event['threatID']
self.threat_id = event['threatID']
self.malicious = self.is_malicious()
if self.malicious:
self.malicious_url = event['url']
# Use the Forensic API to confirm maliciousness of threat
def is_malicious(self):
headers = {'Authorization': 'Basic %s' % b64encode(("%s:%s" % (principal, proofpoint_secret)).encode()).decode(),
'Content-Type': 'application/json'}
session = session_generator()
resp = session.get("https://tap-api-v2.proofpoint.com/v2/forensics?threatId=%s" % self.threat_id,
headers=headers)
if resp.status_code == 200:
response = resp.json()
for report in response['reports']:
for event in report['forensics']:
# If event was malicious in the sandbox, return true
if event['malicious']:
return True
else:
logger.warning('Unable to connect to the forensic API.')
# Fetches emails from Proofpoint TAP from a certain time today
def get_emails(interval):
logger.info('Requesting events from Proofpoint.')
from_time = str(
(datetime.datetime.utcnow() - datetime.timedelta(minutes=interval)).isoformat(sep='T', timespec='seconds'))
headers = {'Authorization': 'Basic %s' % b64encode(("%s:%s" % (principal, proofpoint_secret)).encode()).decode(),
'Content-Type': 'application/json'}
session = session_generator()
resp = session.get("https://tap-api-v2.proofpoint.com/v2/siem/all?format=json&sinceTime=%sZ" % from_time, headers=headers)
if resp.status_code == 200:
events = resp.json()
return events
elif resp.status_code == 429:
logger.critical('Throttle limit reached. Wait for 24 hrs.')
exit(1)
elif resp.status_code == 401:
logger.critical('Somebody removed your credentials or TAP is down.')
return None
else:
logger.critical('Proofpoint error %d:%s', resp.status_code, resp.text)
return None
|
rohit-k-das/proofpoint-emailbypass-catcher | mailApp/Directory.py | import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import concurrent.futures
import json
import logging
import time
import jwt
import ConfigParser
import os
MAX_THREADS = 14 # Get max number of threads for multi-threading
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Mail_creds'))
google_service_account_secret = Config.get('Settings', 'Google_Service_Account_Secret')
google_service_account_id = Config.get('Settings', 'Google_Service_Account_ID')
company_domain = Config.get('Settings', 'Company_Domain')
directory_api = 'https://www.googleapis.com/admin/directory/v1/{0}'
# Generate session with max of 3 retries and interval of 1 second
def session_generator():
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Create OAuth token per requirement for each recipient
def generate_directory_api_access_token(recipient):
access_token = None
expiry = None
jwt_header = {"alg": "RS256", "typ": "JWT"}
iat = time.time()
exp = iat + 3600
jwt_claim_set = {
'iss': google_service_account_id,
'scope': 'https://www.googleapis.com/auth/admin.directory.group.readonly https://www.googleapis.com/auth/admin.directory.user.readonly',
'sub': recipient,
'aud': 'https://www.googleapis.com/oauth2/v4/token',
'iat': iat,
'exp': exp
}
secret = bytes(google_service_account_secret.replace('\\n', '\n'), 'utf-8')
signed_jwt = jwt.encode(jwt_claim_set, secret, headers=jwt_header, algorithm='RS256')
headers = {"Content-Type": "application/json; charset=utf-8"}
data = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', 'assertion': signed_jwt.decode('utf-8').replace("'", '"')}
url = 'https://www.googleapis.com/oauth2/v4/token'
session = session_generator()
resp = session.post(url, headers=headers, data=json.dumps(data))
if resp.ok:
response = resp.json()
access_token = response['access_token']
expiry = time.time() + response['expires_in']
elif resp.status_code == 400 and "Invalid email" in resp.json()['error']['message']:
logger.info("Recipient %s not found" % recipient)
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
access_token, expiry = generate_directory_api_access_token(recipient)
else:
logger.error('Failed to generate access token')
logger.error("%d:%s" % (resp.status_code, resp.text))
return access_token, expiry
# Check if user/email exists
def user_check(recipient, access_token):
user_email = "" # Default value
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
url = directory_api.format("users/{0}")
resp = session.get(url.format(recipient), headers=headers)
response = resp.json()
if resp.ok:
if 'user' in response['kind'] and response['isMailboxSetup']:
user_email = response['primaryEmail']
# Handle Rate Limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
user_email = user_check(recipient, access_token)
# If user doesn't exist
elif resp.status_code == 400 and response['error']['message'] == "Type not supported: userKey":
logger.error("%s is not a user" % recipient)
elif resp.status_code == 403 and response['error']['message'] == 'Not Authorized to access this resource/api':
logger.error("%s is not a user" % recipient)
elif resp.status_code == 404 and response['error']['message'] == 'Resource Not Found: userKey':
logger.error("%s is not a user" % recipient)
# Handle other http errors
else:
logger.error("Unable to check user %s" % recipient)
logger.error("%d:%s" % (resp.status_code, response))
return user_email
# Check if the dl exists
def group_check(recipient, access_token):
dl_email = "" # Default value of DL email
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
url = directory_api.format('groups/{0}')
resp = session.get(url.format(recipient), headers=headers)
response = resp.json()
if resp.ok:
if 'group' in response['kind']:
if int(response['directMembersCount']) > 0:
dl_email = response['email']
# Handle Rate Limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
dl_email = group_check(recipient, access_token)
# If user doesn't exist
elif resp.status_code == 404 and response['error']['message'] == "Resource Not Found: groupKey":
logger.error("%s is not a group" % recipient)
elif resp.status_code == 403 and response['error']['message'] == 'Not Authorized to access this resource/api':
logger.error("%s is not a group" % recipient)
# Handle other http errors
else:
logger.error("Unable to check group %s" % recipient)
logger.error("%d:%s" % (resp.status_code, response))
return dl_email
# Get all members in a DL
def get_group_members(recipient, access_token, pagination_url=""):
recipients = [] # All individual mailboxes/users
groups = [] # If the DL contains another DL
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
if pagination_url:
resp = session.get(pagination_url, headers=headers)
else:
params = {'maxResults': 1000}
url = directory_api.format("groups/{0}/members")
resp = session.get(url.format(recipient), headers=headers, params=params)
response = resp.json()
if resp.ok:
if response['members']:
for member in response['members']:
if member['status'] == 'ACTIVE':
# If the recipient is group/DL
if 'group' in member['kind']:
groups.append(member['email'])
# If the recipient is a user
if 'member' in member['kind']:
recipients.append(member['email'])
# Make recursive calls if a DL contains another DL
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(get_group_members, email, access_token) for email in groups]
block_of_futures = []
if len(fs) > 15:
block_of_futures = [fs[i:i+15] for i in range(0, len(fs), 15)]
else:
block_of_futures.append(fs)
for futures in block_of_futures:
if futures:
for future in concurrent.futures.as_completed(futures):
recipients.extend(future.result())
# Pagination
if 'nextPageToken' in response:
pageToken = response['nextPageToken']
if 'pageToken' in resp.url:
pagination_url = '{0}&pageToken={1}'.format(resp.url.split('&pageToken')[0], pageToken)
else:
pagination_url = '{0}&pageToken={1}'.format(resp.url, pageToken)
else:
pagination_url = ''
if pagination_url:
recipients.extend(get_group_members(recipient, access_token, pagination_url))
# Handle Rate Limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
recipients.extend(get_group_members(recipient, access_token, pagination_url))
# Handle other http errors
else:
logger.error("Unable to get members of group %s" % recipient)
logger.error("%d:%s" % (resp.status_code, response))
return recipients
# Check if employee still works in the company
def recipient_exits_check(recipient, access_token):
recipients = [] # A list of recipients that still work in the company
# Get the username associated with the email address
user_email = user_check(recipient, access_token)
if user_email:
recipients.append(user_email)
else:
# Might be a DL
dl_email = group_check(recipient, access_token)
if dl_email:
recipients_from_dl = get_group_members(recipient, access_token)
if not recipients_from_dl:
# For DL containing 0 members
logger.info("No recipients found for {0}".format(recipient))
else:
recipients.extend(recipients_from_dl) # Add members of DL
else:
logger.info("{0} not a Email DL nor a user".format(recipient))
return recipients
# Get all users that have a mailbox
def list_all_active_users(access_token, pagination_url=""):
recipients = []
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % access_token}
session = session_generator()
if pagination_url:
resp = session.get(pagination_url, headers=headers)
else:
params = {'maxResults': 500, 'orderBy': 'email', 'domain': company_domain, 'query': 'isMailboxSetup=True&isSuspended=False'}
url = directory_api.format("users")
resp = session.get(url, headers=headers, params=params)
response = resp.json()
if resp.ok:
for user in response['users']:
recipients.append(user['primaryEmail'])
# Pagination
if 'nextPageToken' in response:
pageToken = response['nextPageToken']
if 'pageToken' in resp.url:
pagination_url = '{0}&pageToken={1}'.format(resp.url.split('&pageToken')[0], pageToken)
else:
pagination_url = '{0}&pageToken={1}'.format(resp.url, pageToken)
else:
pagination_url = ''
if pagination_url:
recipients.extend(list_all_active_users(access_token, pagination_url))
# Handle Rate Limiting
elif resp.status_code == 429:
logger.error('Too many requests. Sleeping %s' % resp.json()['error']['message'])
time.sleep(1)
recipients.extend(list_all_active_users(access_token, pagination_url))
# Handle other http errors
else:
logger.error("Unable to get all active users")
logger.error("%d:%s" % (resp.status_code, response))
return recipients
|
Antidote/dawn-cmake | third_party/vulkan-deps/update-commit-message.py | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# based on an almost identical script by: <EMAIL> (<NAME>)
"""Updates the commit message used in the auto-roller.
Merges several small commit logs into a single more useful commit message.
Usage:
update_commit_message.py --old-revision=<sha1>
"""
import argparse
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
GCLIENT_LINE = r'([^:]+): ([^@]+)@(.*)'
CHANGE_TEMPLATE = '* %s: %s.git/+log/%s..%s'
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
GCLIENT = """\
solutions = [{
'name': '.',
'url': 'https://chromium.googlesource.com/vulkan-deps.git',
'deps_file': 'DEPS',
'managed': False,
}]
"""
INSERT_NEEDLE = 'If this roll has caused a breakage'
def run(cmd, args):
exe = ('%s.bat' % cmd) if platform.system() == 'Windows' else cmd
cmd_args = [exe] + list(args)
return subprocess.check_output(cmd_args).decode('ascii').strip()
def git(*args):
return run('git', args)
def gclient(*args):
return run('gclient', args)
def parse_revinfo(output):
expr = re.compile(GCLIENT_LINE)
config = dict()
urls = dict()
for line in output.split('\n'):
match = expr.match(line.strip())
if match:
dep = match.group(1)
urls[dep] = match.group(2)
config[dep] = match.group(3)
return config, urls
def _local_commit_amend(commit_msg, dry_run):
logging.info('Amending changes to local commit.')
old_commit_msg = git('log', '-1', '--pretty=%B')
logging.debug('Existing commit message:\n%s\n', old_commit_msg)
insert_index = old_commit_msg.rfind(INSERT_NEEDLE)
if insert_index == -1:
logging.exception('"%s" not found in commit message.' % INSERT_NEEDLE)
new_commit_msg = old_commit_msg[:insert_index] + commit_msg + '\n\n' + old_commit_msg[insert_index:]
logging.debug('New commit message:\n%s\n', new_commit_msg)
if not dry_run:
with tempfile.NamedTemporaryFile(delete=False, mode="w") as ntf:
ntf.write(new_commit_msg)
ntf.close()
git('commit', '--amend', '--no-edit', '--file=%s' % ntf.name)
os.unlink(ntf.name)
def main(raw_args):
parser = argparse.ArgumentParser()
parser.add_argument('--old-revision', help='Old git revision in the roll.', required=True)
parser.add_argument(
'--dry-run',
help='Test out functionality without making changes.',
action='store_true',
default=False)
parser.add_argument(
'-v', '--verbose', help='Verbose debug logging.', action='store_true', default=False)
args = parser.parse_args(raw_args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
cwd = os.getcwd()
os.chdir(SCRIPT_DIR)
old_deps_content = git('show', '%s:DEPS' % args.old_revision)
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
# Add the gclientfile.
with open(os.path.join(tempdir, '.gclient'), 'w') as gcfile:
gcfile.write(GCLIENT)
gcfile.close()
# Get the current config.
shutil.copyfile(os.path.join(SCRIPT_DIR, 'DEPS'), os.path.join(tempdir, 'DEPS'))
gclient_head_output = gclient('revinfo')
# Get the prior config.
with open('DEPS', 'w') as deps:
deps.write(old_deps_content)
deps.close()
gclient_old_output = gclient('revinfo')
os.chdir(SCRIPT_DIR)
head_config, urls = parse_revinfo(gclient_head_output)
old_config, _ = parse_revinfo(gclient_old_output)
changed_deps = []
for dep, new_sha1 in head_config.items():
if dep in old_config:
old_sha1 = old_config[dep]
if new_sha1 != old_sha1:
dep_short = dep.replace('\\', '/').split('/')[0]
repo = urls[dep]
logging.debug('Found change: %s to %s' % (dep, new_sha1))
changed_deps.append(CHANGE_TEMPLATE %
(dep_short, repo, old_sha1[:10], new_sha1[:10]))
if not changed_deps:
print('No changed dependencies, early exit.')
return EXIT_SUCCESS
commit_msg = 'Changed dependencies:\n%s' % '\n'.join(sorted(changed_deps))
os.chdir(cwd)
_local_commit_amend(commit_msg, args.dry_run)
return EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Antidote/dawn-cmake | scripts/extract.py | # Copyright (c) 2015, Google Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Extracts archives."""
import hashlib
import optparse
import os
import os.path
import tarfile
import shutil
import sys
import zipfile
def CheckedJoin(output, path):
"""
CheckedJoin returns os.path.join(output, path). It does sanity checks to
ensure the resulting path is under output, but shouldn't be used on untrusted
input.
"""
path = os.path.normpath(path)
if os.path.isabs(path) or path.startswith('.'):
raise ValueError(path)
return os.path.join(output, path)
class FileEntry(object):
def __init__(self, path, mode, fileobj):
self.path = path
self.mode = mode
self.fileobj = fileobj
class SymlinkEntry(object):
def __init__(self, path, mode, target):
self.path = path
self.mode = mode
self.target = target
def IterateZip(path):
"""
IterateZip opens the zip file at path and returns a generator of entry objects
for each file in it.
"""
with zipfile.ZipFile(path, 'r') as zip_file:
for info in zip_file.infolist():
if info.filename.endswith('/'):
continue
yield FileEntry(info.filename, None, zip_file.open(info))
def IterateTar(path, compression):
"""
IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
entry objects for each file in it.
"""
with tarfile.open(path, 'r:' + compression) as tar_file:
for info in tar_file:
if info.isdir():
pass
elif info.issym():
yield SymlinkEntry(info.name, None, info.linkname)
elif info.isfile():
yield FileEntry(info.name, info.mode,
tar_file.extractfile(info))
else:
raise ValueError('Unknown entry type "%s"' % (info.name, ))
def main(args):
parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT')
parser.add_option('--no-prefix',
dest='no_prefix',
action='store_true',
help='Do not remove a prefix from paths in the archive.')
options, args = parser.parse_args(args)
if len(args) != 2:
parser.print_help()
return 1
archive, output = args
if not os.path.exists(archive):
# Skip archives that weren't downloaded.
return 0
with open(archive) as f:
sha256 = hashlib.sha256()
while True:
chunk = f.read(1024 * 1024)
if not chunk:
break
sha256.update(chunk)
digest = sha256.hexdigest()
stamp_path = os.path.join(output, ".dawn_archive_digest")
if os.path.exists(stamp_path):
with open(stamp_path) as f:
if f.read().strip() == digest:
print("Already up-to-date.")
return 0
if archive.endswith('.zip'):
entries = IterateZip(archive)
elif archive.endswith('.tar.gz'):
entries = IterateTar(archive, 'gz')
elif archive.endswith('.tar.bz2'):
entries = IterateTar(archive, 'bz2')
else:
raise ValueError(archive)
try:
if os.path.exists(output):
print("Removing %s" % (output, ))
shutil.rmtree(output)
print("Extracting %s to %s" % (archive, output))
prefix = None
num_extracted = 0
for entry in entries:
# Even on Windows, zip files must always use forward slashes.
if '\\' in entry.path or entry.path.startswith('/'):
raise ValueError(entry.path)
if not options.no_prefix:
new_prefix, rest = entry.path.split('/', 1)
# Ensure the archive is consistent.
if prefix is None:
prefix = new_prefix
if prefix != new_prefix:
raise ValueError((prefix, new_prefix))
else:
rest = entry.path
# Extract the file into the output directory.
fixed_path = CheckedJoin(output, rest)
if not os.path.isdir(os.path.dirname(fixed_path)):
os.makedirs(os.path.dirname(fixed_path))
if isinstance(entry, FileEntry):
with open(fixed_path, 'wb') as out:
shutil.copyfileobj(entry.fileobj, out)
elif isinstance(entry, SymlinkEntry):
os.symlink(entry.target, fixed_path)
else:
raise TypeError('unknown entry type')
# Fix up permissions if needbe.
# TODO(davidben): To be extra tidy, this should only track the execute bit
# as in git.
if entry.mode is not None:
os.chmod(fixed_path, entry.mode)
# Print every 100 files, so bots do not time out on large archives.
num_extracted += 1
if num_extracted % 100 == 0:
print("Extracted %d files..." % (num_extracted, ))
finally:
entries.close()
with open(stamp_path, 'w') as f:
f.write(digest)
print("Done. Extracted %d files." % (num_extracted, ))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Antidote/dawn-cmake | generator/generator_lib.py | #!/usr/bin/env python3
# Copyright 2019 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to create generators that render multiple Jinja2 templates for GN.
A helper module that can be used to create generator scripts (clients)
that expand one or more Jinja2 templates, without outputs usable from
GN and Ninja build-based systems. See generator_lib.gni as well.
Clients should create a Generator sub-class, then call run_generator()
with a proper derived class instance.
Clients specify a list of FileRender operations, each one of them will
output a file into a temporary output directory through Jinja2 expansion.
All temporary output files are then grouped and written to into a single JSON
file, that acts as a convenient single GN output target. Use extract_json.py
to extract the output files from the JSON tarball in another GN action.
--depfile can be used to specify an output Ninja dependency file for the
JSON tarball, to ensure it is regenerated any time one of its dependencies
changes.
Finally, --expected-output-files can be used to check the list of generated
output files.
"""
import argparse, json, os, re, sys
from collections import namedtuple
# A FileRender represents a single Jinja2 template render operation:
#
# template: Jinja2 template name, relative to --template-dir path.
#
# output: Output file path, relative to temporary output directory.
#
# params_dicts: iterable of (name:string -> value:string) dictionaries.
# All of them will be merged before being sent as Jinja2 template
# expansion parameters.
#
# Example:
# FileRender('api.c', 'src/project_api.c', [{'PROJECT_VERSION': '1.0.0'}])
#
FileRender = namedtuple('FileRender', ['template', 'output', 'params_dicts'])
# The interface that must be implemented by generators.
class Generator:
def get_description(self):
"""Return generator description for --help."""
return ""
def add_commandline_arguments(self, parser):
"""Add generator-specific argparse arguments."""
pass
def get_file_renders(self, args):
"""Return the list of FileRender objects to process."""
return []
def get_dependencies(self, args):
"""Return a list of extra input dependencies."""
return []
# Allow custom Jinja2 installation path through an additional python
# path from the arguments if present. This isn't done through the regular
# argparse because PreprocessingLoader uses jinja2 in the global scope before
# "main" gets to run.
#
# NOTE: If this argument appears several times, this only uses the first
# value, while argparse would typically keep the last one!
kJinja2Path = '--jinja2-path'
try:
jinja2_path_argv_index = sys.argv.index(kJinja2Path)
# Add parent path for the import to succeed.
path = os.path.join(sys.argv[jinja2_path_argv_index + 1], os.pardir)
sys.path.insert(1, path)
except ValueError:
# --jinja2-path isn't passed, ignore the exception and just import Jinja2
# assuming it already is in the Python PATH.
pass
import jinja2
# A custom Jinja2 template loader that removes the extra indentation
# of the template blocks so that the output is correctly indented
class _PreprocessingLoader(jinja2.BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = os.path.join(self.path, template)
if not os.path.exists(path):
raise jinja2.TemplateNotFound(template)
mtime = os.path.getmtime(path)
with open(path) as f:
source = self.preprocess(f.read())
return source, path, lambda: mtime == os.path.getmtime(path)
blockstart = re.compile('{%-?\s*(if|elif|else|for|block|macro)[^}]*%}')
blockend = re.compile('{%-?\s*(end(if|for|block|macro)|elif|else)[^}]*%}')
def preprocess(self, source):
lines = source.split('\n')
# Compute the current indentation level of the template blocks and
# remove their indentation
result = []
indentation_level = 0
# Filter lines that are pure comments. line_comment_prefix is not
# enough because it removes the comment but doesn't completely remove
# the line, resulting in more verbose output.
lines = filter(lambda line: not line.strip().startswith('//*'), lines)
# Remove indentation templates have for the Jinja control flow.
for line in lines:
# The capture in the regex adds one element per block start or end,
# so we divide by two. There is also an extra line chunk
# corresponding to the line end, so we subtract it.
numends = (len(self.blockend.split(line)) - 1) // 2
indentation_level -= numends
result.append(self.remove_indentation(line, indentation_level))
numstarts = (len(self.blockstart.split(line)) - 1) // 2
indentation_level += numstarts
return '\n'.join(result) + '\n'
def remove_indentation(self, line, n):
for _ in range(n):
if line.startswith(' '):
line = line[4:]
elif line.startswith('\t'):
line = line[1:]
else:
assert line.strip() == ''
return line
_FileOutput = namedtuple('FileOutput', ['name', 'content'])
def _do_renders(renders, template_dir):
loader = _PreprocessingLoader(template_dir)
env = jinja2.Environment(loader=loader,
lstrip_blocks=True,
trim_blocks=True,
line_comment_prefix='//*')
def do_assert(expr):
assert expr
return ''
def debug(text):
print(text)
base_params = {
'enumerate': enumerate,
'format': format,
'len': len,
'debug': debug,
'assert': do_assert,
}
outputs = []
for render in renders:
params = {}
params.update(base_params)
for param_dict in render.params_dicts:
params.update(param_dict)
content = env.get_template(render.template).render(**params)
outputs.append(_FileOutput(render.output, content))
return outputs
# Compute the list of imported, non-system Python modules.
# It assumes that any path outside of the root directory is system.
def _compute_python_dependencies(root_dir=None):
if not root_dir:
# Assume this script is under generator/ by default.
root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
root_dir = os.path.abspath(root_dir)
module_paths = (module.__file__ for module in sys.modules.values()
if module and hasattr(module, '__file__'))
paths = set()
for path in module_paths:
# Builtin/namespaced modules may return None for the file path.
if not path:
continue
path = os.path.abspath(path)
if not path.startswith(root_dir):
continue
if (path.endswith('.pyc')
or (path.endswith('c') and not os.path.splitext(path)[1])):
path = path[:-1]
paths.add(path)
return paths
def run_generator(generator):
parser = argparse.ArgumentParser(
description=generator.get_description(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
generator.add_commandline_arguments(parser)
parser.add_argument('--template-dir',
default='templates',
type=str,
help='Directory with template files.')
parser.add_argument(
kJinja2Path,
default=None,
type=str,
help='Additional python path to set before loading Jinja2')
parser.add_argument(
'--output-json-tarball',
default=None,
type=str,
help=('Name of the "JSON tarball" to create (tar is too annoying '
'to use in python).'))
parser.add_argument(
'--depfile',
default=None,
type=str,
help='Name of the Ninja depfile to create for the JSON tarball')
parser.add_argument(
'--expected-outputs-file',
default=None,
type=str,
help="File to compare outputs with and fail if it doesn't match")
parser.add_argument(
'--root-dir',
default=None,
type=str,
help=('Optional source root directory for Python dependency '
'computations'))
parser.add_argument(
'--allowed-output-dirs-file',
default=None,
type=str,
help=("File containing a list of allowed directories where files "
"can be output."))
parser.add_argument(
'--print-cmake-dependencies',
default=False,
action="store_true",
help=("Prints a semi-colon separated list of dependencies to "
"stdout and exits."))
parser.add_argument(
'--print-cmake-outputs',
default=False,
action="store_true",
help=("Prints a semi-colon separated list of outputs to "
"stdout and exits."))
parser.add_argument('--output-dir',
default=None,
type=str,
help='Directory where to output generate files.')
args = parser.parse_args()
renders = generator.get_file_renders(args)
# Output a list of all dependencies for CMake or the tarball for GN/Ninja.
if args.depfile != None or args.print_cmake_dependencies:
dependencies = generator.get_dependencies(args)
dependencies += [
args.template_dir + os.path.sep + render.template
for render in renders
]
dependencies += _compute_python_dependencies(args.root_dir)
if args.depfile != None:
with open(args.depfile, 'w') as f:
f.write(args.output_json_tarball + ": " +
" ".join(dependencies))
if args.print_cmake_dependencies:
sys.stdout.write(";".join(dependencies))
return 0
# The caller wants to assert that the outputs are what it expects.
# Load the file and compare with our renders.
if args.expected_outputs_file != None:
with open(args.expected_outputs_file) as f:
expected = set([line.strip() for line in f.readlines()])
actual = {render.output for render in renders}
if actual != expected:
print("Wrong expected outputs, caller expected:\n " +
repr(sorted(expected)))
print("Actual output:\n " + repr(sorted(actual)))
return 1
# Print the list of all the outputs for cmake.
if args.print_cmake_outputs:
sys.stdout.write(";".join([
os.path.join(args.output_dir, render.output) for render in renders
]))
return 0
outputs = _do_renders(renders, args.template_dir)
# The caller wants to assert that the outputs are only in specific
# directories.
if args.allowed_output_dirs_file != None:
with open(args.allowed_output_dirs_file) as f:
allowed_dirs = set([line.strip() for line in f.readlines()])
for directory in allowed_dirs:
if not directory.endswith('/'):
print('Allowed directory entry "{}" doesn\'t '
'end with /'.format(directory))
return 1
def check_in_subdirectory(path, directory):
return path.startswith(
directory) and not '/' in path[len(directory):]
for render in renders:
if not any(
check_in_subdirectory(render.output, directory)
for directory in allowed_dirs):
print('Output file "{}" is not in the allowed directory '
'list below:'.format(render.output))
for directory in sorted(allowed_dirs):
print(' "{}"'.format(directory))
return 1
# Output the JSON tarball
if args.output_json_tarball != None:
json_root = {}
for output in outputs:
json_root[output.name] = output.content
with open(args.output_json_tarball, 'w') as f:
f.write(json.dumps(json_root))
# Output the files directly.
if args.output_dir != None:
for output in outputs:
output_path = os.path.join(args.output_dir, output.name)
directory = os.path.dirname(output_path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(output_path, 'w') as outfile:
outfile.write(output.content)
|
Antidote/dawn-cmake | third_party/tint/fuzzers/generate_wgsl_corpus.py | <filename>third_party/tint/fuzzers/generate_wgsl_corpus.py
#!/usr/bin/env python3
# Copyright 2021 The Tint Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Collect all .wgsl files under a given directory and copy them to a given
# corpus directory, flattening their file names by replacing path
# separators with underscores. If the output directory already exists, it
# will be deleted and re-created. Files ending with ".expected.spvasm" are
# skipped.
#
# The intended use of this script is to generate a corpus of WGSL shaders
# for fuzzing.
#
# Usage:
# generate_wgsl_corpus.py <input_dir> <corpus_dir>
import os
import pathlib
import shutil
import sys
def list_wgsl_files(root_search_dir):
for root, folders, files in os.walk(root_search_dir):
for filename in folders + files:
if pathlib.Path(filename).suffix == '.wgsl':
yield os.path.join(root, filename)
def main():
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " <input dir> <output dir>")
return 1
input_dir: str = os.path.abspath(sys.argv[1].rstrip(os.sep))
corpus_dir: str = os.path.abspath(sys.argv[2])
if os.path.exists(corpus_dir):
shutil.rmtree(corpus_dir)
os.makedirs(corpus_dir)
for in_file in list_wgsl_files(input_dir):
if in_file.endswith(".expected.wgsl"):
continue
out_file = in_file[len(input_dir) + 1:].replace(os.sep, '_')
shutil.copy(in_file, corpus_dir + os.sep + out_file)
if __name__ == "__main__":
sys.exit(main())
|
uk-gov-mirror/metomi.fab | source/fab/builder.py | ##############################################################################
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
##############################################################################
import logging
from pathlib import Path
from fab.database import SqliteStateDatabase, FileInfoDatabase
from fab.artifact import \
Artifact, \
FortranSource, \
CSource, \
CHeader, \
BinaryObject, \
Seen, \
HeadersAnalysed, \
Modified, \
Raw, \
Analysed, \
Compiled
from fab.tasks.common import Linker, HeaderAnalyser
from fab.tasks.fortran import \
FortranWorkingState, \
FortranPreProcessor, \
FortranAnalyser, \
FortranCompiler
from fab.tasks.c import \
CWorkingState, \
CPragmaInjector, \
CPreProcessor, \
CAnalyser, \
CCompiler
from fab.source_tree import \
TreeDescent, \
SourceVisitor
from fab.queue import QueueManager
from fab.engine import Engine, PathMap
def entry() -> None:
"""
Entry point for the Fab build tool.
"""
import argparse
import configparser
import multiprocessing
import sys
import fab
logger = logging.getLogger('fab')
logger.addHandler(logging.StreamHandler(sys.stderr))
description = 'Flexible build system for scientific software.'
parser = argparse.ArgumentParser(add_help=False,
description=description)
# We add our own help so as to capture as many permutations of how people
# might ask for help. The default only looks for a subset.
parser.add_argument('-h', '-help', '--help', action='help',
help='Print this help and exit')
parser.add_argument('-V', '--version', action='version',
version=fab.__version__,
help='Print version identifier and exit')
parser.add_argument('-v', '--verbose', action='store_true',
help='Produce a running commentary on progress')
parser.add_argument('-w', '--workspace', metavar='PATH', type=Path,
default=Path.cwd() / 'working',
help='Directory for working files.')
parser.add_argument('--nprocs', action='store', type=int, default=2,
choices=range(2, multiprocessing.cpu_count()),
help='Provide number of processors available for use,'
'default is 2 if not set.')
parser.add_argument('source', type=Path,
help='The path of the source tree to build')
parser.add_argument('conf_file', type=Path, default='config.ini',
help='The path of the configuration file')
arguments = parser.parse_args()
if arguments.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
config = configparser.ConfigParser(allow_no_value=True)
configfile = arguments.conf_file
config.read(configfile)
settings = config['settings']
flags = config['flags']
# If not provided, name the exec after the target
if settings['exec-name'] == '':
settings['exec-name'] = settings['target']
application = Fab(arguments.workspace,
settings['target'],
settings['exec-name'],
flags['fpp-flags'],
flags['fc-flags'],
flags['ld-flags'],
arguments.nprocs)
application.run(arguments.source)
class Fab(object):
def __init__(self,
workspace: Path,
target: str,
exec_name: str,
fpp_flags: str,
fc_flags: str,
ld_flags: str,
n_procs: int):
self._workspace = workspace
if not workspace.exists():
workspace.mkdir(parents=True)
self._state = SqliteStateDatabase(workspace)
# Path maps tell the engine what filetype and starting state
# the Artifacts representing any files encountered by the
# initial descent should have
path_maps = [
PathMap(r'.*\.f90', FortranSource, Raw),
PathMap(r'.*\.F90', FortranSource, Seen),
PathMap(r'.*\.c', CSource, Seen),
PathMap(r'.*\.h', CHeader, Seen),
]
# Initialise the required Tasks, providing them with any static
# properties such as flags to use, workspace location etc
# TODO: Eventually the tasks may instead access many of these
# properties via the configuration (at Task runtime, to allow for
# file-specific overrides?)
fortran_preprocessor = FortranPreProcessor(
'cpp', ['-traditional-cpp', '-P'] + fpp_flags.split(), workspace
)
fortran_analyser = FortranAnalyser(workspace)
fortran_compiler = FortranCompiler(
'gfortran',
['-c', '-J', str(workspace)] + fc_flags.split(), workspace
)
header_analyser = HeaderAnalyser(workspace)
c_pragma_injector = CPragmaInjector(workspace)
c_preprocessor = CPreProcessor(
'cpp', [], workspace
)
c_analyser = CAnalyser(workspace)
c_compiler = CCompiler(
'gcc', ['-c'], workspace
)
linker = Linker(
'gcc', ['-lc', '-lgfortran'] + ld_flags.split(),
workspace, exec_name
)
# The Task map tells the engine what Task it should be using
# to deal with Artifacts depending on their type and state
task_map = {
(FortranSource, Seen): fortran_preprocessor,
(FortranSource, Raw): fortran_analyser,
(FortranSource, Analysed): fortran_compiler,
(CSource, Seen): header_analyser,
(CHeader, Seen): header_analyser,
(CSource, HeadersAnalysed): c_pragma_injector,
(CHeader, HeadersAnalysed): c_pragma_injector,
(CSource, Modified): c_preprocessor,
(CSource, Raw): c_analyser,
(CSource, Analysed): c_compiler,
(BinaryObject, Compiled): linker,
}
engine = Engine(workspace,
target,
path_maps,
task_map)
self._queue = QueueManager(n_procs - 1, engine)
def _extend_queue(self, artifact: Artifact) -> None:
self._queue.add_to_queue(artifact)
def run(self, source: Path):
self._queue.run()
visitor = SourceVisitor(self._extend_queue)
descender = TreeDescent(source)
descender.descend(visitor)
self._queue.check_queue_done()
self._queue.shutdown()
file_db = FileInfoDatabase(self._state)
for file_info in file_db:
print(file_info.filename)
# Where files are generated in the working directory
# by third party tools, we cannot guarantee the hashes
if file_info.filename.match(f'{self._workspace}/*'):
print(' hash: --hidden-- (generated file)')
else:
print(f' hash: {file_info.adler32}')
fortran_db = FortranWorkingState(self._state)
for fortran_info in fortran_db:
print(fortran_info.unit.name)
print(' found in: ' + str(fortran_info.unit.found_in))
print(' depends on: ' + str(fortran_info.depends_on))
c_db = CWorkingState(self._state)
for c_info in c_db:
print(c_info.symbol.name)
print(' found_in: ' + str(c_info.symbol.found_in))
print(' depends on: ' + str(c_info.depends_on))
|
uk-gov-mirror/metomi.fab | source/fab/dumper.py | <reponame>uk-gov-mirror/metomi.fab
##############################################################################
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
##############################################################################
"""
Core of the database dump application.
"""
import logging
from pathlib import Path
import sys
from fab.database import FileInfoDatabase, SqliteStateDatabase
from fab.tasks.fortran import FortranWorkingState
from fab.tasks.c import CWorkingState
def entry() -> None:
"""
Entry point for the Fab state database dump tool.
"""
import argparse
import fab
logger = logging.getLogger('fab')
logger.addHandler(logging.StreamHandler(sys.stderr))
description = 'Flexible build system for scientific software.'
parser = argparse.ArgumentParser(add_help=False,
description=description)
# We add our own help so as to capture as many permutations of how people
# might ask for help. The default only looks for a subset.
parser.add_argument('-h', '-help', '--help', action='help',
help='Print this help and exit')
parser.add_argument('-V', '--version', action='version',
version=fab.__version__,
help='Print version identifier and exit')
parser.add_argument('-v', '--verbose', action='store_true',
help='Produce a running commentary on progress')
parser.add_argument('-w', '--workspace', metavar='PATH', type=Path,
default=Path.cwd() / 'working',
help='Directory for working files.')
arguments = parser.parse_args()
if arguments.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
application = Dump(arguments.workspace)
application.run()
class Dump(object):
def __init__(self, workspace: Path):
self._workspace = workspace
self._state = SqliteStateDatabase(workspace)
def run(self, stream=sys.stdout):
file_view = FileInfoDatabase(self._state)
print("File View", file=stream)
for file_info in file_view:
print(f" File : {file_info.filename}", file=stream)
# Where files are generated in the working directory
# by third party tools, we cannot guarantee the hashes
if file_info.filename.match(f'{self._workspace}/*'):
print(' Hash : --hidden-- (generated file)')
else:
print(f" Hash : {file_info.adler32}", file=stream)
fortran_view = FortranWorkingState(self._state)
header = False
for info in fortran_view:
if not header:
print("Fortran View", file=stream)
header = True
print(f" Program unit : {info.unit.name}", file=stream)
print(f" Found in : {info.unit.found_in}", file=stream)
print(f" Prerequisites : {', '.join(info.depends_on)}",
file=stream)
c_view = CWorkingState(self._state)
header = False
for info in c_view:
if not header:
print("C View", file=stream)
header = True
print(f" Symbol : {info.symbol.name}", file=stream)
print(f" Found in : {info.symbol.found_in}", file=stream)
print(f" Prerequisites : {', '.join(info.depends_on)}",
file=stream)
|
uk-gov-mirror/metomi.fab | source/fab/tasks/common.py | <reponame>uk-gov-mirror/metomi.fab<filename>source/fab/tasks/common.py
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
import re
import subprocess
from typing import List, Optional, Match
from pathlib import Path
from fab.artifact import \
Artifact, \
Executable, \
HeadersAnalysed, \
Linked
from fab.tasks import Task, TaskException
from fab.reader import FileTextReader
class Linker(Task):
def __init__(self,
linker: str,
flags: List[str],
workspace: Path,
output_filename: str):
self._linker = linker
self._flags = flags
self._workspace = workspace
self._output_filename = output_filename
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) < 1:
msg = ('Linker expects at least one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
command = [self._linker]
output_file = self._workspace / self._output_filename
command.extend(['-o', str(output_file)])
for artifact in artifacts:
command.append(str(artifact.location))
command.extend(self._flags)
subprocess.run(command, check=True)
return [Artifact(output_file,
Executable,
Linked)]
class HeaderAnalyser(Task):
_include_re = r'^\s*#include\s+(\S+)'
_include_pattern = re.compile(_include_re)
def __init__(self, workspace: Path):
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('Header Analyser expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
new_artifact = Artifact(artifact.location,
artifact.filetype,
HeadersAnalysed)
reader = FileTextReader(artifact.location)
for line in reader.line_by_line():
include_match: Optional[Match] \
= self._include_pattern.match(line)
if include_match:
include: str = include_match.group(1)
if include.startswith(('"', "'")):
include = include.strip('"').strip("'")
new_artifact.add_dependency(
Path(self._workspace / include))
return [new_artifact]
|
uk-gov-mirror/metomi.fab | source/fab/tasks/c.py | # (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
"""
C language handling classes.
"""
import subprocess
import re
import clang.cindex # type: ignore
from collections import deque
from typing import \
List, \
Iterator, \
Pattern, \
Optional, \
Match, \
Sequence, \
Generator, \
Union
from pathlib import Path
from fab.database import \
StateDatabase, \
DatabaseDecorator, \
FileInfoDatabase, \
SqliteStateDatabase, \
WorkingStateException
from fab.tasks import Task, TaskException
from fab.artifact import \
Artifact, \
Raw, \
Modified, \
Analysed, \
Compiled, \
BinaryObject
from fab.reader import \
TextReader, \
FileTextReader, \
TextReaderDecorator
class CSymbolUnresolvedID(object):
def __init__(self, name: str):
self.name = name
def __eq__(self, other):
if not isinstance(other, CSymbolUnresolvedID):
message = "Cannot compare CSymbolUnresolvedID with " \
+ other.__class__.__name__
raise TypeError(message)
return other.name == self.name
class CSymbolID(CSymbolUnresolvedID):
def __init__(self, name: str, found_in: Path):
super().__init__(name)
self.found_in = found_in
def __hash__(self):
return hash(self.name) + hash(self.found_in)
def __eq__(self, other):
if not isinstance(other, CSymbolID):
message = "Cannot compare CSymbolID with " \
+ other.__class__.__name__
raise TypeError(message)
return super().__eq__(other) and other.found_in == self.found_in
class CInfo(object):
def __init__(self,
symbol: CSymbolID,
depends_on: Sequence[str] = ()):
self.symbol = symbol
self.depends_on = list(depends_on)
def __str__(self):
return f"C symbol '{self.symbol.name}' " \
f"from '{self.symbol.found_in}' depending on: " \
f"{', '.join(self.depends_on)}"
def __eq__(self, other):
if not isinstance(other, CInfo):
message = "Cannot compare C Info with " \
+ other.__class__.__name__
raise TypeError(message)
return other.symbol == self.symbol \
and other.depends_on == self.depends_on
def add_prerequisite(self, prereq: str):
self.depends_on.append(prereq)
class CWorkingState(DatabaseDecorator):
"""
Maintains a database of information relating to C symbols.
"""
# According to the C standard, section 5.2.4.1,
# (C11) ISO/IEC 9899, the maximum length of an
# external identifier is 31 characters.
#
_C_LABEL_LENGTH: int = 31
def __init__(self, database: StateDatabase):
super().__init__(database)
create_symbol_table = [
f'''create table if not exists c_symbol (
id integer primary key,
symbol character({self._C_LABEL_LENGTH}) not null,
found_in character({FileInfoDatabase.PATH_LENGTH})
references file_info (filename)
)''',
'''create index if not exists idx_c_symbol
on c_symbol (symbol, found_in)'''
]
self.execute(create_symbol_table, {})
# Although the current symbol will already have been entered into the
# database it is not necessarily unique. We may have multiple source
# files which define identically named symbols. Thus it can not be used
# as a foreign key alone.
#
# Meanwhile the dependency symbol may not have been encountered yet so
# we can't expect it to be in the database. Thus it too may not be
# used as a foreign key.
#
create_prerequisite_table = [
f'''create table if not exists c_prerequisite (
id integer primary key,
symbol character({self._C_LABEL_LENGTH}) not null,
found_in character({FileInfoDatabase.PATH_LENGTH}) not null,
prerequisite character({self._C_LABEL_LENGTH}) not null,
foreign key (symbol, found_in)
references c_symbol (symbol, found_in)
)'''
]
self.execute(create_prerequisite_table, {})
def __iter__(self) -> Generator[CInfo, None, None]:
"""
Yields all symbols and their containing file names.
:return: Object per symbol.
"""
query = '''select s.symbol as name, s.found_in, p.prerequisite as prereq
from c_symbol as s
left join c_prerequisite as p
on p.symbol = s.symbol and p.found_in = s.found_in
order by s.symbol, s.found_in, p.prerequisite'''
rows = self.execute([query], {})
info: Optional[CInfo] = None
key: CSymbolID = CSymbolID('', Path())
for row in rows:
if CSymbolID(row['name'], Path(row['found_in'])) == key:
if info is not None:
info.add_prerequisite(row['prereq'])
else: # (row['name'], row['found_in']) != key
if info is not None:
yield info
key = CSymbolID(row['name'], Path(row['found_in']))
info = CInfo(key)
if row['prereq']:
info.add_prerequisite(row['prereq'])
if info is not None: # We have left-overs
yield info
def add_c_symbol(self, symbol: CSymbolID) -> None:
"""
Creates a record of a new symbol and the file it is found in.
Note that the filename is absolute meaning that if you rename or move
the source directory nothing will match up.
:param symbol: symbol identifier.
"""
add_symbol = [
'''insert into c_symbol (symbol, found_in)
values (:symbol, :filename)'''
]
self.execute(add_symbol,
{'symbol': symbol.name,
'filename': str(symbol.found_in)})
def add_c_dependency(self,
symbol: CSymbolID,
depends_on: str) -> None:
"""
Records the dependency of one symbol on another.
:param symbol: symbol identifier.
:param depends_on: Name of the prerequisite symbol.
"""
add_dependency = [
'''insert into c_prerequisite(symbol, found_in, prerequisite)
values (:symbol, :found_in, :depends_on)'''
]
self.execute(add_dependency, {'symbol': symbol.name,
'found_in': str(symbol.found_in),
'depends_on': depends_on})
def remove_c_file(self, filename: Union[Path, str]) -> None:
"""
Removes all records relating of a particular source file.
:param filename: File to be removed.
"""
remove_file = [
'''delete from c_prerequisite
where found_in = :filename''',
'''delete from c_symbol where found_in=:filename'''
]
self.execute(remove_file, {'filename': str(filename)})
def get_symbol(self, name: str) -> List[CInfo]:
"""
Gets the details of symbols given their name.
It is possible that identically named symbols appear in multiple
files, hence why a list is returned. It would be an error to try
linking these into a single executable but that is not a concern for
the model of the source tree.
:param name: symbol name.
:return: List of symbol information objects.
"""
query = '''select s.symbol, s.found_in, p.prerequisite
from c_symbol as s
left join c_prerequisite as p
on p.symbol = s.symbol and p.found_in = s.found_in
where s.symbol=:symbol
order by s.symbol, s.found_in, p.prerequisite'''
rows = self.execute(query, {'symbol': name})
info_list: List[CInfo] = []
previous_id = None
info: Optional[CInfo] = None
for row in rows:
symbol_id = CSymbolID(row['symbol'], Path(row['found_in']))
if previous_id is not None and symbol_id == previous_id:
if info is not None:
info.add_prerequisite(row['prerequisite'])
else: # symbol_id != previous_id
if info is not None:
info_list.append(info)
info = CInfo(symbol_id)
if row['prerequisite'] is not None:
info.add_prerequisite((row['prerequisite']))
previous_id = symbol_id
if info is not None: # We have left overs
info_list.append(info)
if len(info_list) == 0:
message = 'symbol "{symbol}" not found in database.'
raise WorkingStateException(message.format(symbol=name))
return info_list
def depends_on(self, symbol: CSymbolID)\
-> Generator[CSymbolID, None, None]:
"""
Gets the prerequisite symbols of a symbol.
:param symbol: symbol identifier.
:return: Prerequisite symbol names. May be an empty list.
"""
query = '''select p.prerequisite, f.found_in
from c_prerequisite as p
left join c_symbol as f on f.symbol = p.prerequisite
where p.symbol=:symbol and p.found_in=:filename
order by p.symbol, f.found_in'''
rows = self.execute(query, {'symbol': symbol.name,
'filename': str(symbol.found_in)})
for row in rows:
if row['found_in'] is None:
yield CSymbolUnresolvedID(row['prerequisite'])
else: # row['found_in'] is not None
yield CSymbolID(row['prerequisite'], Path(row['found_in']))
class CAnalyser(Task):
def __init__(self, workspace: Path):
self.database = SqliteStateDatabase(workspace)
def _locate_include_regions(self, trans_unit) -> None:
# Aim is to identify where included (top level) regions
# start and end in the file
self._include_region = []
# Use a deque to implement a rolling window of 4 identifiers
# (enough to be sure we can spot an entire pragma)
identifiers: deque = deque([])
for token in trans_unit.cursor.get_tokens():
identifiers.append(token)
if len(identifiers) < 4:
continue
if len(identifiers) > 4:
identifiers.popleft()
# Trigger off of the FAB identifier only to save
# on joining the group too frequently
if identifiers[2].spelling == "FAB":
lineno = identifiers[2].location.line
full = " ".join(id.spelling for id in identifiers)
if full == "# pragma FAB SysIncludeStart":
self._include_region.append(
(lineno, "sys_include_start"))
elif full == "# pragma FAB SysIncludeEnd":
self._include_region.append(
(lineno, "sys_include_end"))
elif full == "# pragma FAB UsrIncludeStart":
self._include_region.append(
(lineno, "usr_include_start"))
elif full == "# pragma FAB UsrIncludeEnd":
self._include_region.append(
(lineno, "usr_include_end"))
def _check_for_include(self, lineno) -> Optional[str]:
# Check whether a given line number is in a region that
# has come from an include (and return what kind of include)
include_stack = []
for region_line, region_type in self._include_region:
if region_line > lineno:
break
if region_type.endswith("start"):
include_stack.append(region_type.replace("_start", ""))
elif region_type.endswith("end"):
include_stack.pop()
if include_stack:
return include_stack[-1]
else:
return None
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('C Analyser expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
reader = FileTextReader(artifact.location)
state = CWorkingState(self.database)
state.remove_c_file(reader.filename)
new_artifact = Artifact(artifact.location,
artifact.filetype,
Analysed)
state = CWorkingState(self.database)
state.remove_c_file(reader.filename)
index = clang.cindex.Index.create()
translation_unit = index.parse(reader.filename,
args=["-xc"])
# Create include region line mappings
self._locate_include_regions(translation_unit)
# Now walk the actual nodes and find all relevant external symbols
usr_includes = []
current_def = None
for node in translation_unit.cursor.walk_preorder():
if node.kind == clang.cindex.CursorKind.FUNCTION_DECL:
if (node.is_definition()
and node.linkage == clang.cindex.LinkageKind.EXTERNAL):
# This should catch function definitions which are exposed
# to the rest of the application
current_def = CSymbolID(node.spelling, artifact.location)
state.add_c_symbol(current_def)
new_artifact.add_definition(node.spelling)
else:
# Any other declarations should be coming in via headers,
# we can use the injected pragmas to work out whether these
# are coming from system headers or user headers
if (self._check_for_include(node.location.line)
== "usr_include"):
usr_includes.append(node.spelling)
elif (node.kind == clang.cindex.CursorKind.CALL_EXPR):
# When encountering a function call we should be able to
# cross-reference it with a definition seen earlier; and
# if it came from a user supplied header then we will
# consider it a dependency within the project
if node.spelling in usr_includes and current_def is not None:
# TODO: Assumption that the most recent exposed
# definition encountered above is the one which
# should lodge this dependency - is that true?
state.add_c_dependency(current_def, node.spelling)
new_artifact.add_dependency(node.spelling)
return [new_artifact]
class _CTextReaderPragmas(TextReaderDecorator):
"""
Reads a C source file but when encountering an #include
preprocessor directive injects a special Fab-specific
#pragma which can be picked up later by the Analyser
after the preprocessing
"""
def __init__(self, source: TextReader):
super().__init__(source)
self._line_buffer = ''
_include_re: str = r'^\s*#include\s+(\S+)'
_include_pattern: Pattern = re.compile(_include_re)
def line_by_line(self) -> Iterator[str]:
for line in self._source.line_by_line():
include_match: Optional[Match] \
= self._include_pattern.match(line)
if include_match:
# For valid C the first character of the matched
# part of the group will indicate whether this is
# a system library include or a user include
include: str = include_match.group(1)
# TODO: Is this sufficient? Or do the pragmas
# need to include identifying info
# e.g. the name of the original include?
if include.startswith('<'):
yield '#pragma FAB SysIncludeStart\n'
yield line
yield '#pragma FAB SysIncludeEnd\n'
elif include.startswith(('"', "'")):
yield '#pragma FAB UsrIncludeStart\n'
yield line
yield '#pragma FAB UsrIncludeEnd\n'
else:
msg = 'Found badly formatted #include'
raise TaskException(msg)
else:
yield line
class CPragmaInjector(Task):
def __init__(self, workspace: Path):
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('C Pragma Injector expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
injector = _CTextReaderPragmas(
FileTextReader(artifact.location))
output_file = self._workspace / artifact.location.name
out_lines = [line for line in injector.line_by_line()]
with output_file.open('w') as out_file:
for line in out_lines:
out_file.write(line)
new_artifact = Artifact(output_file,
artifact.filetype,
Modified)
for dependency in artifact.depends_on:
new_artifact.add_dependency(dependency)
return [new_artifact]
class CPreProcessor(Task):
def __init__(self,
preprocessor: str,
flags: List[str],
workspace: Path):
self._preprocessor = preprocessor
self._flags = flags
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('C Preprocessor expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
command = [self._preprocessor]
command.extend(self._flags)
command.append(str(artifact.location))
# Use temporary output name (in case the given tool
# can't operate in-place)
output_file = (self._workspace /
artifact.location.with_suffix('.fabcpp').name)
command.append(str(output_file))
subprocess.run(command, check=True)
# Overwrite actual output file
final_output = (self._workspace /
artifact.location.name)
command = ["mv", str(output_file), str(final_output)]
subprocess.run(command, check=True)
return [Artifact(final_output,
artifact.filetype,
Raw)]
class CCompiler(Task):
def __init__(self,
compiler: str,
flags: List[str],
workspace: Path):
self._compiler = compiler
self._flags = flags
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('C Compiler expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
command = [self._compiler]
command.extend(self._flags)
command.append(str(artifact.location))
output_file = (self._workspace /
artifact.location.with_suffix('.o').name)
command.extend(['-o', str(output_file)])
subprocess.run(command, check=True)
object_artifact = Artifact(output_file,
BinaryObject,
Compiled)
for definition in artifact.defines:
object_artifact.add_definition(definition)
return [object_artifact]
|
uk-gov-mirror/metomi.fab | source/fab/tasks/fortran.py | <reponame>uk-gov-mirror/metomi.fab
# (c) Crown copyright Met Office. All rights reserved.
# For further details please refer to the file COPYRIGHT
# which you should have received as part of this distribution
"""
Fortran language handling classes.
"""
import logging
from pathlib import Path
import re
import subprocess
from typing import (Generator,
Iterator,
List,
Match,
Optional,
Pattern,
Sequence,
Tuple,
Union)
from fab.database import (DatabaseDecorator,
FileInfoDatabase,
StateDatabase,
SqliteStateDatabase,
WorkingStateException)
from fab.tasks import \
Task, \
TaskException
from fab.reader import TextReader, TextReaderDecorator, FileTextReader
from fab.artifact import \
Artifact, \
Analysed, \
Raw, \
Compiled, \
BinaryObject
class FortranUnitUnresolvedID(object):
def __init__(self, name: str):
self.name = name
def __eq__(self, other):
if not isinstance(other, FortranUnitUnresolvedID):
message = "Cannot compare FortranUnitUnresolvedID with " \
+ other.__class__.__name__
raise TypeError(message)
return other.name == self.name
class FortranUnitID(FortranUnitUnresolvedID):
def __init__(self, name: str, found_in: Path):
super().__init__(name)
self.found_in = found_in
def __hash__(self):
return hash(self.name) + hash(self.found_in)
def __eq__(self, other):
if not isinstance(other, FortranUnitID):
message = "Cannot compare FortranUnitID with " \
+ other.__class__.__name__
raise TypeError(message)
return super().__eq__(other) and other.found_in == self.found_in
class FortranInfo(object):
def __init__(self,
unit: FortranUnitID,
depends_on: Sequence[str] = ()):
self.unit = unit
self.depends_on = list(depends_on)
def __str__(self):
return f"Fortran program unit '{self.unit.name}' " \
f"from '{self.unit.found_in}' depending on: " \
f"{', '.join(self.depends_on)}"
def __eq__(self, other):
if not isinstance(other, FortranInfo):
message = "Cannot compare Fortran Info with " \
+ other.__class__.__name__
raise TypeError(message)
return other.unit == self.unit and other.depends_on == self.depends_on
def add_prerequisite(self, prereq: str):
self.depends_on.append(prereq)
class FortranWorkingState(DatabaseDecorator):
"""
Maintains a database of information relating to Fortran program units.
"""
# According to the Fortran spec, section 3.2.2 in
# BS ISO/IEC 1539-1:2010, the maximum size of a name is 63 characters.
#
# If you find source containing labels longer than this then that source
# is non-conformant.
#
_FORTRAN_LABEL_LENGTH: int = 63
def __init__(self, database: StateDatabase):
super().__init__(database)
create_unit_table = [
f'''create table if not exists fortran_unit (
id integer primary key,
unit character({self._FORTRAN_LABEL_LENGTH}) not null,
found_in character({FileInfoDatabase.PATH_LENGTH})
references file_info (filename)
)''',
'''create index if not exists idx_fortran_program_unit
on fortran_unit (unit, found_in)'''
]
self.execute(create_unit_table, {})
# Although the current unit will already have been entered into the
# database it is not necessarily unique. We may have multiple source
# files which define identically named units. Thus it can not be used
# as a foreign key alone.
#
# Meanwhile the dependency unit may not have been encountered yet so
# we can't expect it to be in the database. Thus it too may not be
# used as a foreign key.
#
create_prerequisite_table = [
f'''create table if not exists fortran_prerequisite (
id integer primary key,
unit character({self._FORTRAN_LABEL_LENGTH}) not null,
found_in character({FileInfoDatabase.PATH_LENGTH}) not null,
prerequisite character({self._FORTRAN_LABEL_LENGTH}) not null,
foreign key (unit, found_in)
references fortran_unit (unit, found_in)
)'''
]
self.execute(create_prerequisite_table, {})
def __iter__(self) -> Generator[FortranInfo, None, None]:
"""
Yields all units and their containing file names.
:return: Object per unit.
"""
query = '''select u.unit as name, u.found_in, p.prerequisite as prereq
from fortran_unit as u
left join fortran_prerequisite as p
on p.unit = u.unit and p.found_in = u.found_in
order by u.unit, u.found_in, p.prerequisite'''
rows = self.execute([query], {})
info: Optional[FortranInfo] = None
key: FortranUnitID = FortranUnitID('', Path())
for row in rows:
if FortranUnitID(row['name'], Path(row['found_in'])) == key:
if info is not None:
info.add_prerequisite(row['prereq'])
else: # (row['name'], row['found_in']) != key
if info is not None:
yield info
key = FortranUnitID(row['name'], Path(row['found_in']))
info = FortranInfo(key)
if row['prereq']:
info.add_prerequisite(row['prereq'])
if info is not None: # We have left-overs
yield info
def add_fortran_program_unit(self, unit: FortranUnitID) -> None:
"""
Creates a record of a new program unit and the file it is found in.
Note that the filename is absolute meaning that if you rename or move
the source directory nothing will match up.
:param unit: Program unit identifier.
"""
add_unit = [
'''insert into fortran_unit (unit, found_in)
values (:unit, :filename)'''
]
self.execute(add_unit,
{'unit': unit.name, 'filename': str(unit.found_in)})
def add_fortran_dependency(self,
unit: FortranUnitID,
depends_on: str) -> None:
"""
Records the dependency of one unit on another.
:param unit: Program unit identifier.
:param depends_on: Name of the prerequisite unit.
"""
add_dependency = [
'''insert into fortran_prerequisite(unit, found_in, prerequisite)
values (:unit, :found_in, :depends_on)'''
]
self.execute(add_dependency, {'unit': unit.name,
'found_in': str(unit.found_in),
'depends_on': depends_on})
def remove_fortran_file(self, filename: Union[Path, str]) -> None:
"""
Removes all records relating of a particular source file.
:param filename: File to be removed.
"""
remove_file = [
'''delete from fortran_prerequisite
where found_in = :filename''',
'''delete from fortran_unit where found_in=:filename'''
]
self.execute(remove_file, {'filename': str(filename)})
def get_program_unit(self, name: str) -> List[FortranInfo]:
"""
Gets the details of program units given their name.
It is possible that identically named program units appear in multiple
files, hence why a list is returned. It would be an error to try
linking these into a single executable but that is not a concern for
the model of the source tree.
:param name: Program unit name.
:return: List of unit information objects.
"""
query = '''select u.unit, u.found_in, p.prerequisite
from fortran_unit as u
left join fortran_prerequisite as p
on p.unit = u.unit and p.found_in = u.found_in
where u.unit=:unit
order by u.unit, u.found_in, p.prerequisite'''
rows = self.execute(query, {'unit': name})
info_list: List[FortranInfo] = []
previous_id = None
info: Optional[FortranInfo] = None
for row in rows:
unit_id = FortranUnitID(row['unit'], Path(row['found_in']))
if previous_id is not None and unit_id == previous_id:
if info is not None:
info.add_prerequisite(row['prerequisite'])
else: # unit_id != previous_id
if info is not None:
info_list.append(info)
info = FortranInfo(unit_id)
if row['prerequisite'] is not None:
info.add_prerequisite((row['prerequisite']))
previous_id = unit_id
if info is not None: # We have left overs
info_list.append(info)
if len(info_list) == 0:
message = 'Program unit "{unit}" not found in database.'
raise WorkingStateException(message.format(unit=name))
return info_list
def depends_on(self, unit: FortranUnitID)\
-> Generator[FortranUnitID, None, None]:
"""
Gets the prerequisite program units of a program unit.
:param unit: Program unit identifier.
:return: Prerequisite unit names. May be an empty list.
"""
query = '''select p.prerequisite, u.found_in
from fortran_prerequisite as p
left join fortran_unit as u on u.unit = p.prerequisite
where p.unit=:unit and p.found_in=:filename
order by p.unit, u.found_in'''
rows = self.execute(query, {'unit': unit.name,
'filename': str(unit.found_in)})
for row in rows:
if row['found_in'] is None:
yield FortranUnitUnresolvedID(row['prerequisite'])
else: # row['found_in'] is not None
yield FortranUnitID(row['prerequisite'], Path(row['found_in']))
class FortranNormaliser(TextReaderDecorator):
def __init__(self, source: TextReader):
super().__init__(source)
self._line_buffer = ''
def line_by_line(self) -> Iterator[str]:
"""
Each line of the source file is modified to ease the work of analysis.
The lines are sanitised to remove comments and collapse the result
of continuation lines whilst also trimming away as much whitespace as
possible
"""
for line in self._source.line_by_line():
# Remove comments - we accept that an exclamation mark
# appearing in a string will cause the rest of that line
# to be blanked out, but the things we wish to parse
# later shouldn't appear after a string on a line anyway
line = re.sub(r'!.*', '', line)
# If the line is empty, go onto the next
if line.strip() == '':
continue
# Deal with continuations by removing them to collapse
# the lines together
self._line_buffer += line
if '&' in self._line_buffer:
self._line_buffer = re.sub(r'&\s*$', '', self._line_buffer)
continue
# Before output, minimise whitespace but add a space on the end
# of the line.
line_buffer = re.sub(r'\s+', r' ', self._line_buffer)
yield line_buffer.rstrip()
self._line_buffer = ''
class FortranAnalyser(Task):
def __init__(self, workspace: Path):
self.database = SqliteStateDatabase(workspace)
_intrinsic_modules = ['iso_fortran_env']
_letters: str = r'abcdefghijklmnopqrstuvwxyz'
_digits: str = r'1234567890'
_underscore: str = r'_'
_alphanumeric_re: str = '[' + _letters + _digits + _underscore + ']'
_name_re: str = '[' + _letters + ']' + _alphanumeric_re + '*'
_procedure_block_re: str = r'function|subroutine'
_unit_block_re: str = r'program|module|' + _procedure_block_re
_scope_block_re: str = r'associate|block|critical|do|if|select'
_iface_block_re: str = r'interface'
_type_block_re: str = r'type'
_program_unit_re: str = r'^\s*({unit_type_re})\s*({name_re})' \
.format(unit_type_re=_unit_block_re,
name_re=_name_re)
_scoping_re: str = r'^\s*(({name_re})\s*:)?\s*({scope_type_re})' \
.format(scope_type_re=_scope_block_re,
name_re=_name_re)
_procedure_re: str = r'^\s*({procedure_block_re})\s*({name_re})' \
.format(procedure_block_re=_procedure_block_re,
name_re=_name_re)
_interface_re: str = r'^\s*{iface_block_re}\s*({name_re})?' \
.format(iface_block_re=_iface_block_re,
name_re=_name_re)
_type_re: str = r'^\s*{type_block_re}' \
r'((\s*,\s*[^,]+)*\s*::)?' \
r'\s*({name_re})'.format(type_block_re=_type_block_re,
name_re=_name_re)
_end_block_re: str \
= r'^\s*end' \
r'\s*({scope_block_re}|{iface_block_re}' \
r'|{type_block_re}|{unit_type_re})?' \
r'\s*({name_re})?'.format(scope_block_re=_scope_block_re,
iface_block_re=_iface_block_re,
type_block_re=_type_block_re,
unit_type_re=_unit_block_re,
name_re=_name_re)
_use_statement_re: str \
= r'^\s*use((\s*,\s*non_intrinsic)?\s*::)?\s*({name_re})' \
.format(name_re=_name_re)
_program_unit_pattern: Pattern = re.compile(_program_unit_re,
re.IGNORECASE)
_scoping_pattern: Pattern = re.compile(_scoping_re, re.IGNORECASE)
_procedure_pattern: Pattern = re.compile(_procedure_re, re.IGNORECASE)
_interface_pattern: Pattern = re.compile(_interface_re, re.IGNORECASE)
_type_pattern: Pattern = re.compile(_type_re, re.IGNORECASE)
_end_block_pattern: Pattern = re.compile(_end_block_re, re.IGNORECASE)
_use_pattern: Pattern = re.compile(_use_statement_re, re.IGNORECASE)
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
logger = logging.getLogger(__name__)
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('Fortran Analyser expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
reader = FileTextReader(artifact.location)
new_artifact = Artifact(artifact.location,
artifact.filetype,
Analysed)
state = FortranWorkingState(self.database)
state.remove_fortran_file(reader.filename)
normalised_source = FortranNormaliser(reader)
scope: List[Tuple[str, str]] = []
for line in normalised_source.line_by_line():
logger.debug(scope)
logger.debug('Considering: %s', line)
if len(scope) == 0:
unit_match: Optional[Match] \
= self._program_unit_pattern.match(line)
if unit_match:
unit_type: str = unit_match.group(1).lower()
unit_name: str = unit_match.group(2).lower()
logger.debug('Found %s called "%s"', unit_type, unit_name)
unit_id = FortranUnitID(unit_name, reader.filename)
state.add_fortran_program_unit(unit_id)
new_artifact.add_definition(unit_name)
scope.append((unit_type, unit_name))
continue
use_match: Optional[Match] \
= self._use_pattern.match(line)
if use_match:
use_name: str = use_match.group(3).lower()
if use_name in self._intrinsic_modules:
logger.debug('Ignoring intrinsic module "%s"', use_name)
else:
if len(scope) == 0:
use_message \
= '"use" statement found outside program unit'
raise TaskException(use_message)
logger.debug('Found usage of "%s"', use_name)
unit_id = FortranUnitID(scope[0][1], reader.filename)
state.add_fortran_dependency(unit_id, use_name)
new_artifact.add_dependency(use_name)
continue
block_match: Optional[Match] = self._scoping_pattern.match(line)
if block_match:
# Beware we want the value of a different group to the one we
# check the presence of.
#
block_name: str = block_match.group(1) \
and block_match.group(2).lower()
block_nature: str = block_match.group(3).lower()
logger.debug('Found %s called "%s"', block_nature, block_name)
scope.append((block_nature, block_name))
continue
proc_match: Optional[Match] \
= self._procedure_pattern.match(line)
if proc_match:
proc_nature = proc_match.group(1).lower()
proc_name = proc_match.group(2).lower()
logger.debug('Found %s called "%s"', proc_nature, proc_name)
# Note: We append a tuple so double brackets.
scope.append((proc_nature, proc_name))
continue
iface_match: Optional[Match] = self._interface_pattern.match(line)
if iface_match:
iface_name = iface_match.group(1) \
and iface_match.group(1).lower()
logger.debug('Found interface called "%s"', iface_name)
scope.append(('interface', iface_name))
continue
type_match: Optional[Match] = self._type_pattern.match(line)
if type_match:
type_name = type_match.group(3).lower()
logger.debug('Found type called "%s"', type_name)
scope.append(('type', type_name))
continue
end_match: Optional[Match] = self._end_block_pattern.match(line)
if end_match:
end_nature: str = end_match.group(1) \
and end_match.group(1).lower()
end_name: str = end_match.group(2) \
and end_match.group(2).lower()
logger.debug('Found end of %s called %s',
end_nature, end_name)
exp: Tuple[str, str] = scope.pop()
if end_nature is not None:
if end_nature != exp[0]:
end_message = 'Expected end of {exp} "{name}" ' \
'but found {found}'
end_values = {'exp': exp[0],
'name': exp[1],
'found': end_nature}
raise TaskException(
end_message.format(**end_values))
if end_name is not None:
if end_name != exp[1]:
end_message = 'Expected end of {exp} "{name}" ' \
'but found end of {found}'
end_values = {'exp': exp[0],
'name': exp[1],
'found': end_name}
raise TaskException(
end_message.format(**end_values))
return [new_artifact]
class FortranPreProcessor(Task):
def __init__(self,
preprocessor: str,
flags: List[str],
workspace: Path):
self._preprocessor = preprocessor
self._flags = flags
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('Fortran Preprocessor expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
command = [self._preprocessor]
command.extend(self._flags)
command.append(str(artifact.location))
output_file = (self._workspace /
artifact.location.with_suffix('.f90').name)
command.append(str(output_file))
subprocess.run(command, check=True)
return [Artifact(output_file,
artifact.filetype,
Raw)]
class FortranCompiler(Task):
def __init__(self,
compiler: str,
flags: List[str],
workspace: Path):
self._compiler = compiler
self._flags = flags
self._workspace = workspace
def run(self, artifacts: List[Artifact]) -> List[Artifact]:
if len(artifacts) == 1:
artifact = artifacts[0]
else:
msg = ('Fortran Compiler expects only one Artifact, '
f'but was given {len(artifacts)}')
raise TaskException(msg)
command = [self._compiler]
command.extend(self._flags)
command.append(str(artifact.location))
output_file = (self._workspace /
artifact.location.with_suffix('.o').name)
command.extend(['-o', str(output_file)])
subprocess.run(command, check=True)
object_artifact = Artifact(output_file,
BinaryObject,
Compiled)
for definition in artifact.defines:
object_artifact.add_definition(definition)
return [object_artifact]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.