repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
mseclab/PyJFuzz
test/test_pjf_configuration.py
Python
mit
2,126
0.001881
""" The MIT License (MIT) Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation f
iles (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice
and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from pyjfuzz.core.pjf_configuration import PJFConfiguration import unittest import argparse import sys __TITLE__ = "Testing PJFConfiguration object" class TestPJFConfiguration(unittest.TestCase): def test_json_configuration(self): sys.argv.append("--J") sys.argv.append("[1]") sys.argv.append("--no-logo") parser = argparse.ArgumentParser(description='', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--J', type=str, default=None) parser.add_argument('--no-logo', action='store_true', dest='nologo', default=False, required=False) parsed = parser.parse_args() args = PJFConfiguration(parsed) for arg in parsed.__dict__: self.assertTrue(arg in args.__dict__) def test(): print("=" * len(__TITLE__)) print(__TITLE__) print("=" * len(__TITLE__)) suite = unittest.TestLoader().loadTestsFromTestCase(TestPJFConfiguration) unittest.TextTestRunner(verbosity=2).run(suite)
achadwick/mypaint
lib/tiledsurface.py
Python
gpl-2.0
42,966
0.000535
# This file is part of MyPaint. # Copyright (C) 2007-2008 by Martin Renold <martinxyz@gmx.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """This module implements an unbounded tiled surface for painting.""" ## Imports from __future__ import division, print_function import time import sys import os import contextlib import logging from gettext import gettext as _ import numpy as np import mypaintlib import helpers import pixbufsurface import lib.surface from lib.surface import TileAccessible, TileBlittable, TileCompositable from errors import FileHandlingError import lib.fileutils import lib.modes logger = logging.getLogger(__name__) ## Constants TILE_SIZE = N = mypaintlib.TILE_SIZE MAX_MIPMAP_LEVEL = mypaintlib.MAX_MIPMAP_LEVEL ## Tile class and marker tile constants class _Tile (object): """Internal tile storage, with readonly flag Note: pixels are stored with premultiplied alpha. 15 bits are used, but fully opaque or white is stored as 2**15 (requiring 16 bits). This is to allow many calcuations to divide by 2**15 instead of (2**16-1). """ def __init__(self, copy_from=None): super(_Tile, self).__init__() if copy_from is None: self.rgba = np.zeros((N, N, 4), 'uint16') else: self.rgba = copy_from.rgba.copy() self.readonly = False def copy(self): return _Tile(copy_from=self) # tile for read-only operations on empty spots transparent_tile = _Tile() transparent_tile.readonly = True # tile with invalid pixel memory (needs refresh) mipmap_dirty_tile = _Tile() del mipmap_dirty_tile.rgba ## Class defs: surfaces class _SurfaceSnapshot (object): pass # TODO: # - move the tile storage from
My
PaintSurface to a separate class class MyPaintSurface (TileAccessible, TileBlittable, TileCompositable): """Tile-based surface The C++ part of this class is in tiledsurface.hpp """ def __init__(self, mipmap_level=0, mipmap_surfaces=None, looped=False, looped_size=(0, 0)): super(MyPaintSurface, self).__init__() # TODO: pass just what it needs access to, not all of self self._backend = mypaintlib.TiledSurface(self) self.tiledict = {} self.observers = [] # Used to implement repeating surfaces, like Background if looped_size[0] % N or looped_size[1] % N: raise ValueError('Looped size must be multiples of tile size') self.looped = looped self.looped_size = looped_size self.mipmap_level = mipmap_level if mipmap_level == 0: assert mipmap_surfaces is None self._mipmaps = self._create_mipmap_surfaces() else: assert mipmap_surfaces is not None self._mipmaps = mipmap_surfaces # Forwarding API self.set_symmetry_state = self._backend.set_symmetry_state self.begin_atomic = self._backend.begin_atomic self.get_color = self._backend.get_color self.get_alpha = self._backend.get_alpha self.draw_dab = self._backend.draw_dab def _create_mipmap_surfaces(self): """Internal: initializes an internal mipmap lookup table Overridable to avoid unnecessary work when initializing the background surface subclass. """ assert self.mipmap_level == 0 mipmaps = [self] for level in range(1, MAX_MIPMAP_LEVEL+1): s = MyPaintSurface(mipmap_level=level, mipmap_surfaces=mipmaps) mipmaps.append(s) # for quick lookup for level, s in enumerate(mipmaps): try: s.parent = mipmaps[level-1] except IndexError: s.parent = None try: s.mipmap = mipmaps[level+1] except IndexError: s.mipmap = None return mipmaps def end_atomic(self): bbox = self._backend.end_atomic() if (bbox[2] > 0 and bbox[3] > 0): self.notify_observers(*bbox) @property def backend(self): return self._backend def notify_observers(self, *args): for f in self.observers: f(*args) def clear(self): tiles = self.tiledict.keys() self.tiledict = {} self.notify_observers(*lib.surface.get_tiles_bbox(tiles)) if self.mipmap: self.mipmap.clear() def trim(self, rect): """Trim the layer to a rectangle, discarding data outside it :param rect: A trimming rectangle in model coordinates :type rect: tuple (x, y, w, h) Only complete tiles are discarded by this method. If a tile is neither fully inside nor fully outside the rectangle, the part of the tile outside the rectangle will be cleared. """ x, y, w, h = rect logger.info("Trim %dx%d%+d%+d", w, h, x, y) trimmed = [] for tx, ty in list(self.tiledict.keys()): if tx*N+N < x or ty*N+N < y or tx*N > x+w or ty*N > y+h: trimmed.append((tx, ty)) self.tiledict.pop((tx, ty)) self._mark_mipmap_dirty(tx, ty) elif (tx*N < x and x < tx*N+N or ty*N < y and y < ty*N+N or tx*N < x+w and x+w < tx*N+N or ty*N < y+h and y+h < ty*N+N): trimmed.append((tx, ty)) with self.tile_request(tx, ty, readonly=False) as rgba: if tx*N < x and x < tx*N+N: rgba[:, 0:(x - tx*N), :] = 0 # Clear left edge if ty*N < y and y < ty*N+N: rgba[0:(y - ty*N), :, :] = 0 # Clear top edge if tx*N < x+w and x+w < tx*N+N: # This slice is [N-1-c for c in range(tx*N+N - (x+w))]. rgba[:, (x+w - tx*N):N, :] = 0 # Clear right edge if ty*N < y+h and y+h < ty*N+N: # This slice is [N-1-r for r in range(ty*N+N - (y+h))]. rgba[(y+h - ty*N):N, :, :] = 0 # Clear bottom edge self._mark_mipmap_dirty(tx, ty) self.notify_observers(*lib.surface.get_tiles_bbox(trimmed)) @contextlib.contextmanager def tile_request(self, tx, ty, readonly): """Get a tile as a NumPy array, then put it back :param int tx: Tile X coord (multiply by TILE_SIZE for pixels) :param int ty: Tile Y coord (multiply by TILE_SIZE for pixels) :param bool readonly: get a read-only tile Context manager that fetches a tile as a NumPy array, and then puts the potentially modified tile back into the tile backing store. To be used with the 'with' statement. Read/write tile requests on empty slots get you a new writeable tile:: >>> surf = MyPaintSurface() >>> with surf.tile_request(1, 2, readonly=False) as t1: ... t1[...] = (1<<15) >>> with surf.tile_request(1, 2, readonly=False) as t2: ... assert t2 is t1 ... assert (t2 == t1).all() Read-only tile requests on empty addresses yield the special transparent tile, which is marked as read-only:: >>> with surf.tile_request(666, 666, readonly=True) as tr: ... assert tr is transparent_tile.rgba Snapshotting a surface makes all its tiles read-only as a side effect, so the next read/write tile request will yield a copy for you to work on:: >>> sshot = surf.save_snapshot() >>> with surf.tile_request(1, 2, readonly=True) as t3: ... assert t3 is t1 ... assert (t3 == t1).all() >>> with surf.tile_request(1, 2, readonly=False) as t4: ... assert t4 is not t1 ... assert (t4 == t1).all() """ numpy_tile = self.
ksmit799/Toontown-Source
toontown/ai/DistributedScavengerHuntTarget.py
Python
mit
1,518
0.001976
from direct.dir
ectnotify import DirectNotifyGlobal from direct.distributed import DistributedObject from otp.speedchat import SpeedChatGlobals class DistributedScavengerHuntTarget(DistributedObject.DistributedObject): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedScavengerHuntTarget') def __init__(self, cr): DistributedObject.DistributedObject.__init__(self, cr) def setupListenerDetails(self):
self.triggered = False self.triggerDelay = 15 self.accept(SpeedChatGlobals.SCCustomMsgEvent, self.phraseSaid) def phraseSaid(self, phraseId): self.notify.debug('Checking if phrase was said') helpPhrase = 10003 def reset(): self.triggered = False if phraseId == helpPhrase and not self.triggered: self.triggered = True self.attemptScavengerHunt() taskMgr.doMethodLater(self.triggerDelay, reset, 'ScavengerHunt-phrase-reset', extraArgs=[]) def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) DistributedScavengerHuntTarget.notify.debug('announceGenerate') self.setupListenerDetails() def delete(self): self.ignoreAll() taskMgr.remove('ScavengerHunt-phrase-reset') DistributedObject.DistributedObject.delete(self) def attemptScavengerHunt(self): DistributedScavengerHuntTarget.notify.debug('attempScavengerHunt') self.sendUpdate('attemptScavengerHunt', [])
nextgis/quickmapservices
src/ds_edit_dialog.py
Python
gpl-2.0
10,803
0.001574
from __fut
ure__ import absolute_import import os import shutil from qgis.PyQt import uic from qgis.PyQt.QtGui import QIcon, Q
Pixmap from qgis.PyQt.QtWidgets import QDialog, QMessageBox from os import path from . import extra_sources from .data_source_info import DataSourceInfo from .data_source_serializer import DataSourceSerializer from .data_sources_list import DataSourcesList from .group_info import GroupInfo from .groups_list import GroupsList from .supported_drivers import KNOWN_DRIVERS from .gui.editor_widget_gdal import EditorWidgetGdal from .gui.editor_widget_tms import EditorWidgetTms from .gui.editor_widget_wms import EditorWidgetWms from .gui.editor_widget_wfs import EditorWidgetWfs from .gui.editor_widget_geojson import EditorWidgetGeoJson from .gui.line_edit_color_validator import LineEditColorValidator from .plugin_settings import PluginSettings from .compat2qgis import getOpenFileName FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'ds_edit_dialog.ui')) def is_same(file1, file2): return os.path.normcase(os.path.normpath(file1)) == \ os.path.normcase(os.path.normpath(file2)) class DsEditDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(DsEditDialog, self).__init__(parent) self.setupUi(self) self.DRV_WIDGETS = { KNOWN_DRIVERS.GDAL: EditorWidgetGdal(), KNOWN_DRIVERS.TMS: EditorWidgetTms(), KNOWN_DRIVERS.WMS: EditorWidgetWms(), KNOWN_DRIVERS.WFS: EditorWidgetWfs(), KNOWN_DRIVERS.GEOJSON: EditorWidgetGeoJson(), } # init icon selector # self.txtIcon.set_dialog_ext(self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)')) # self.txtIcon.set_dialog_title(self.tr('Select icon for data source')) self.iconChooseButton.clicked.connect(self.choose_icon) # init combos self.init_groups_cmb() self.init_types_cmb() self.change_spec_tab() # validators self.id_validator = LineEditColorValidator(self.txtId, '^[A-Za-z0-9_]+$', error_tooltip=self.tr('Any text')) self.alias_validator = LineEditColorValidator(self.txtAlias, '^[A-Za-z0-9_ ]+$', error_tooltip=self.tr('Any text')) # events self.cmbType.currentIndexChanged.connect(self.change_spec_tab) # vars self.ds_info = None self.init_with_existing = False self._editor_tab = None self.set_icon( os.path.join( os.path.dirname(__file__), 'icons', 'mapservices.png' ) ) def init_groups_cmb(self): ds_groups = GroupsList() for ds_group in ds_groups.groups.values(): self.cmbGroup.addItem(QIcon(ds_group.icon), self.tr(ds_group.alias), ds_group) def init_types_cmb(self): for drv in KNOWN_DRIVERS.ALL_DRIVERS: self.cmbType.addItem(drv, drv) def change_spec_tab(self, index=0): # remove old widget self.tabWidget.removeTab(2) # bad! drv = self.cmbType.itemData(self.cmbType.currentIndex()) self.tabWidget.addTab(self.DRV_WIDGETS[drv], drv) def set_ds_info(self, ds_info): self.ds_info = ds_info self.init_with_existing = True # feel fields self.feel_common_fields() self.feel_specific_fields() def fill_ds_info(self, ds_info): self.ds_info = ds_info self.init_with_existing = False # feel fields self.feel_common_fields() self.feel_specific_fields() def choose_icon(self): icon_path = getOpenFileName( self, self.tr('Select icon for data source'), PluginSettings.get_default_user_icon_path(), self.tr('Icons (*.ico *.jpg *.jpeg *.png *.svg);;All files (*.*)') ) if icon_path != "": PluginSettings.set_default_user_icon_path(icon_path) self.set_icon(icon_path) def set_icon(self, icon_path): self.__ds_icon = icon_path self.iconPreview.setPixmap( QPixmap(self.__ds_icon) ) def feel_common_fields(self): self.txtId.setText(self.ds_info.id) self.txtAlias.setText(self.ds_info.alias) # self.txtIcon.set_path(self.ds_info.icon_path) self.set_icon(self.ds_info.icon_path) # license self.txtLicense.setText(self.ds_info.lic_name) self.txtLicenseLink.setText(self.ds_info.lic_link) self.txtCopyrightText.setText(self.ds_info.copyright_text) self.txtCopyrightLink.setText(self.ds_info.copyright_link) self.txtTermsOfUse.setText(self.ds_info.terms_of_use) # set group group_index = None for i in range(self.cmbGroup.count()): if self.cmbGroup.itemData(i).id == self.ds_info.group: group_index = i break if group_index is not None: self.cmbGroup.setCurrentIndex(i) else: non_ex_group = GroupInfo(group_id=self.ds_info.group) self.cmbGroup.addItem(self.ds_info.group, non_ex_group) self.cmbGroup.setCurrentIndex(self.cmbGroup.count()-1) def feel_specific_fields(self): # set type self.cmbType.setCurrentIndex(self.cmbType.findData(self.ds_info.type)) # feel widgets for spec_widget in self.DRV_WIDGETS.values(): spec_widget.feel_form(self.ds_info) def accept(self): new_ds_info = DataSourceInfo() self.feel_ds_info(new_ds_info) if not self.validate(new_ds_info): return if self.init_with_existing: res = self.save_existing(new_ds_info) else: res = self.create_new(new_ds_info) if res: super(DsEditDialog, self).accept() def save_existing(self, ds_info): if ds_info.id != self.ds_info.id and not self.check_existing_id(ds_info.id): return False if ds_info == self.ds_info: return True # replace icon if need if not is_same(ds_info.icon_path, self.ds_info.icon_path): os.remove(self.ds_info.icon_path) dir_path = os.path.dirname(self.ds_info.file_path) ico_file_name = path.basename(ds_info.icon_path) ico_path = path.join(dir_path, ico_file_name) shutil.copy(ds_info.icon_path, ico_path) # replace gdal_conf if need if ds_info.type == KNOWN_DRIVERS.GDAL: def copy_new_gdal_file(): dir_path = os.path.dirname(self.ds_info.file_path) gdal_file_name = path.basename(ds_info.gdal_source_file) gdal_file_path = path.join(dir_path, gdal_file_name) shutil.copy(ds_info.gdal_source_file, gdal_file_path) # old ds = gdal if self.ds_info.type == KNOWN_DRIVERS.GDAL: if ds_info.gdal_source_file != self.ds_info.gdal_source_file: os.remove(self.ds_info.icon_path) copy_new_gdal_file() else: copy_new_gdal_file() # write config DataSourceSerializer.write_to_ini(ds_info, self.ds_info.file_path) return True def create_new(self, ds_info): if not self.check_existing_id(ds_info.id): return False # set paths dir_path = path.join(extra_sources.USER_DIR_PATH, extra_sources.DATA_SOURCES_DIR_NAME, ds_info.id) if path.exists(dir_path): salt = 0 while path.exists(dir_path + str(salt)): salt += 1 dir_path += str(salt) ini_path = path.join(dir_path, 'metadata.ini') ico_path = path.join(dir_path, ds_info.icon) # create dir os.mkdir(dir_path) # copy icon shutil.copy(ds_info.icon_path, ico_path) if ds_info.type == KNOWN_DRIVERS.GDAL: # copy gdal file gdal_file_name = path.basename(ds_info.gdal_source_file) gdal_file_path = path.join(dir_path,
yongwen/makahiki
makahiki/apps/managers/log_mgr/admin.py
Python
mit
1,266
0.00237
"""log model admin.""" from django.contrib import admin from django.db import models from django.forms.widgets import TextInput from apps.managers.challenge_mgr import challenge_mgr from apps.managers.log_mgr.models import MakahikiLog from apps.admin.admin import challenge_designe
r_site, challenge_manager_site, developer_site class MakahikiLogAdmin(admin.ModelAdmin): """admin""" list_display = ('request_url', "remote_user", 'remote_ip', 'request_time', 'request_method', 'response_status') list_filter = ('response_status', 'remote_user') search_fields = ('request_url', 'remote_ip')
ordering = ["-request_time"] date_hierarchy = "request_time" formfield_overrides = { models.CharField: {'widget': TextInput(attrs={'size': '100'})}, } def has_add_permission(self, request): return False admin.site.register(MakahikiLog, MakahikiLogAdmin) challenge_designer_site.register(MakahikiLog, MakahikiLogAdmin) challenge_manager_site.register(MakahikiLog, MakahikiLogAdmin) developer_site.register(MakahikiLog, MakahikiLogAdmin) challenge_mgr.register_admin_challenge_info_model("Status", 1, MakahikiLog, 1) challenge_mgr.register_developer_challenge_info_model("Status", 4, MakahikiLog, 1)
lerouxb/ni
actions/base.py
Python
mit
6,191
0.002746
from ni.core.selection import Selection from ni.core.text import char_pos_to_tab_pos from ni.core.document import InsertDelta, DeleteDelta class Action(object): """Base class for all view actions.""" def __init__(self, view): self.grouped = False self.editor = view.editor self.view = view def execute(self): raise NotImplementedError class MoveCursorAction(Action): """Base class for all actions that involve moving the cursor around.""" def __init__(self, view, is_select=False): super(MoveCursorAction, self).__init__(view) self.is_select = is_select def execute(self): view = self.view doc = view.document original_position = view.cursor_pos original_scroll = view.scroll_pos self.move() if original_position != view.cursor_pos or \ original_scroll != view.scroll_pos: view.invalidate() if self.is_select: if view.selection: end_offset = doc.cursor_pos_to_offset(view.cursor_pos) view.selection.end = end_offset else: start_offset = doc.cursor_pos_to_offset(original_position) end_offset = doc.cursor_pos_to_offset(view.cursor_pos) #print original_position, view.cursor_pos, start_offset, end_offset view.selection = Selection(doc, start_offset, end_offset) def move(self): raise NotImplementedError class EditAction(Action): """Base class for all undoable actions.""" def __init__(self, view): super(EditAction, self).__init__(view) self.before_cursor_pos = None self.before_last_x_pos = None self.before_scroll_pos = None self.after_cursor_pos = None self.after_last_x_pos = None self.after_scroll_pos = None self.deltas = [] self.is_executed = False def execute(self): """ Save positions so that we can return later and call self.do(). """ self.is_executed = True view = self.view # for undo purposes self.before_cursor_pos = view.cursor_pos self.before_last_x_pos = view.last_x_pos self.before_scroll_pos = view.scroll_pos self.do() # recalculate last_x_pos based on where the cursor is now doc = view.document y, x = view.cursor_pos line = doc.get_line(y) view.last_x_pos = char_pos_to_tab_pos(line, x, doc.tab_size) # for redo purposes self.after_cursor_pos = view.cursor_pos self.after_last_x_pos = view.last_x_pos self.after_scroll_pos = view.scroll_pos view.invalidate() def delete_selection(self): """ Common code for deleting a selection used by many edit actions. """ view = self.view doc = view.document # delete the selection selection = view.selection.get_normalised() d = DeleteDelta(doc, selection.start, selection.end-selection.start+1) d.do() self.deltas.append(d) view.selection = None # move the cursor (insert point) to the start of where the selection # was before we deleted it view.cursor_pos = doc.offset_to_cursor_pos(selection.start) def do(self): """ Subclasses should implement this. """ raise NotImplementedError def undo(self): if not self.is_executed: raise RuntimeError("Not executed") for d in reversed(self.deltas): d.undo() # reset the cursor and scroll positions to where it was self.view.cursor_pos = self.before_cursor_pos self.view.last_x_pos = self.before_last_x_pos self.view.scroll_pos = self.before_scroll_pos self.view.invalidate() def redo(self): if not self.is_executed: raise RuntimeError("Not executed") for d in self.deltas: d.do() # reset the cursor and scroll positions to where it was self.view.cursor_pos
= self.after_cursor_pos self.view.last_x_pos = self.after_last_x_pos self.view.scroll_pos = self.after_
scroll_pos self.view.invalidate() class ToggleComment(EditAction): def __init__(self, view, comment_string): self.comment_string = comment_string super(ToggleComment, self).__init__(view) def do(self): view = self.view doc = view.document settings = self.editor.settings if view.selection: selection = view.selection.get_normalised() from_line = doc.offset_to_cursor_pos(selection.start)[0] to_line = doc.offset_to_cursor_pos(selection.end)[0] else: from_line = view.cursor_pos[0] to_line = from_line for y in xrange(from_line, to_line+1): line = doc.get_line(y) offset = doc.cursor_pos_to_offset((y, 0)) if line[:len(self.comment_string)] == self.comment_string: d = DeleteDelta(doc, offset, len(self.comment_string)) else: d = InsertDelta(doc, offset, self.comment_string) d.do() self.deltas.append(d) # move the cursor if necessary y, x = view.cursor_pos line = doc.get_line(y) if line[:len(self.comment_string)] == self.comment_string: # we added comment_string, so increase cursor pos if x != 0: x += len(self.comment_string) if x > len(line): x = len(line) view.cursor_pos = (y, x) else: # we removed comment_string, so decrease cursor pos x -= len(self.comment_string) if x < 0: x = 0 view.cursor_pos = (y, x) # not sure how best to grow/shrink the selection right now, # so just destroying it for now view.selection = None
wylieswanson/mythutils
mythutils_recfail_alarm/setup.py
Python
gpl-3.0
441
0.054422
#!/usr/bin/env python from glob import glob from distutils.core import setup setup( name="mythutils_recfail_alarm", version="1.0", description="Autoamtically notify on Recorder Failed via Prowl service", author="Wylie Swanson", author_email="wylie@pingzero.net", url="http://www.pingzero.net", scripts=glob("bin/*"), data_
files=[ ( '/etc/mythutils/', glob('etc/mythutils/*') ), (
'/etc/cron.d/', glob('etc/cron.d/*') ), ] )
nesi/easybuild-framework
easybuild/toolchains/xlmvapich2.py
Python
gpl-2.0
573
0.001745
## # You should have received a copy of the GNU General Public License # along w
ith EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBui
ld support for xlmpich compiler toolchain (includes IBM XL compilers (xlc, xlf) and MPICH). @author: Jack Perdue <j-perdue@tamu.edu> - TAMU HPRC - http://sc.tamu.edu """ from easybuild.toolchains.compiler.ibmxl import IBMXL from easybuild.toolchains.mpi.mvapich2 import Mvapich2 class Xlompi(IBMXL, Mvapich2): """ Compiler toolchain with IBM XL compilers (xlc/xlf) and MPICH. """ NAME = 'xlmvapich2'
CIGNo-project/CIGNo
cigno/metadata/admin.py
Python
gpl-3.0
16,379
0.01044
#from django.contrib import admin from django.contrib.gis import admin from modeltranslation.admin import TranslationAdmin, TranslationTabularInline from django.contrib.contenttypes.generic import GenericTabularInline from cigno.mdtools.models import Connection from django.utils.translation import ugettext_lazy as _ from geonode.core.models import UserObjectRoleMapping from django.http import HttpResponseRedirect from models import * # riferimento per resize-fields-in-django-admin # http://stackoverflow.com/questions/910169/resize-fields-in-django-admin translation_js = ( '/static/modeltranslation/js/force_jquery.js', 'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js', '/static/modeltranslation/js/tabbed_translation_fields.js', ) translation_css = { 'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',), } class ConnectionInline(GenericTabularInline): model = Connection ct_field = 'o_content_type' ct_fk_field = 'o_object_id' class InverseConnectionInline(GenericTabularInline): model = Connection ct_field = 'd_content_type' ct_fk_field = 'd_object_id' class OnlineResourceInline(admin.TabularInline): model = OnlineResource classes = ('collapse closed',) class TemporalExtentInline(admin.TabularInline): model = TemporalExtent classes = ('collapse closed',) extra = 1 class ResourceTemporalExtentInline(admin.TabularInline): model = ResourceTemporalExtent classes = ('collapse closed',) class ReferenceDateInline(admin.TabularInline): model = ReferenceDate classes = ('collapse closed',) extra = 1 class ResourceReferenceDateInline(admin.TabularInline): model = ResourceReferenceDate classes = ('collapse closed',) extra = 1 class ConformityInline(admin.TabularInline): model = Conformity classes = ('collapse closed',) extra = 1 class ResourceConformityInline(admin.TabularInline): model = ResourceConformity classes = ('collapse closed',) extra = 1 class ResponsiblePartyRoleInline(admin.TabularInline): model = ResponsiblePartyRole classes = ('collapse closed',) extra = 1 class ResourceResponsiblePartyRoleInline(admin.TabularInline): model = ResourceResponsiblePartyRole classes = ('collapse closed',) extra = 1 class MdResponsiblePartyRoleInline(admin.TabularInline): model = MdResponsiblePartyRole #exclude = ('role',) readonly_fields = ('role',) classes = ('collapse closed',) extra = 1 class ResourceMdResponsiblePartyRoleInline(admin.TabularInline): model = ResourceMdResponsiblePartyRole #exclude = ('role',) readonly_fields = ('role',) classes = ('collapse closed',) extra = 1 class BaseCodeAdmin(TranslationAdmin): list_editable = ['label',] list_display = ['id', 'label'] class Media: js = translation_js
css = translation_css class BaseCodeIsoAdmin(TranslationAdmin): list_editable = ['label','isoid'] list_display = ['id', 'label', 'isoid'] class Media: js = translation_js css = translation_css class CodeRefSysAdmin(TranslationAdm
in): list_editable = ['label', 'srid'] list_display = ['id', 'label', 'srid'] class Media: js = translation_js css = translation_css class CodeLicenseAdmin(TranslationAdmin): list_editable = ['label', 'abstract'] list_display = ['id', 'label', 'abstract'] class Media: js = translation_js css = translation_css class CodeDistributionFormatAdmin(TranslationAdmin): list_editable = ['format','label', 'version', 'mimetype', 'ordering'] list_display = ['id', 'format', 'label', 'version', 'mimetype', 'ordering'] class Media: js = translation_js css = translation_css class ResponsiblePartyAdmin(TranslationAdmin): # list_editable = ['label', 'version', 'ordering'] # list_display = ['id', 'label', 'version', 'ordering'] class Media: js = translation_js css = translation_css class LayerExtAdmin(TranslationAdmin): # row-level permissions # http://www.ibm.com/developerworks/opensource/library/os-django-admin/index.html def queryset(self, request): qs = super(LayerExtAdmin, self).queryset(request) if request.user.is_superuser: return qs return qs.filter(id__in = UserObjectRoleMapping.objects.filter(user=request.user, role__codename__in =('layer_readwrite','layer_admin') ).values_list('object_id',flat=True) ) list_display = ('titleml',) inlines = [ # OnlineResourceInline, TemporalExtentInline, ReferenceDateInline, ConformityInline, ResponsiblePartyRoleInline, MdResponsiblePartyRoleInline, # ConnectionInline, # InverseConnectionInline, ] #raw_id_fields = ("parent_identifier",) filter_horizontal = ['presentation_form','spatial_representation_type_ext','topic_category_ext','responsible_party_role','distribution_format','md_responsible_party_role'] # filter_horizontal #readonly_fields = ['uuid', 'geographic_bounding_box'] # readonly_fields = ['uuid', 'md_uuid', 'geographic_bounding_box', 'md_standard_name', 'md_version_name', 'md_character_set'] search_fields = ['titleml', 'abstractml'] search_fields_verbose = ['Titolo', 'Descrizione'] #GRAPPELLI list_filter = ('resource_type', 'spatial_representation_type_ext', 'topic_category', 'distribution_format') list_display = ('id', 'titleml', 'inspire', 'completeness_bar') fieldsets = ( (_('Metadata'), { 'classes': ('collapse closed',), 'fields': ( 'md_uuid', #'lingua_metadata', 'md_date_stamp', ('md_character_set', 'md_standard_name', 'md_version_name') ) }), (_('Identification'), { 'classes': ('collapse closed',), 'fields': ( 'titleml', 'abstractml', # 'source_document', # override by resources connections #'resource_type', 'parent_identifier', 'other_citation_details', 'other_citation_details', 'presentation_form', 'distribution_format' ) }), (_('Identification2'), { 'classes': ('collapse closed',), 'fields': ( ('resource_type', 'uuid'), ('language', 'character_set'), 'supplemental_information_ml', 'update_frequency', 'spatial_representation_type_ext' ) }), (_('Responsible Party'), { 'classes': ('collapse closed',), 'fields': [] }), (_('Classification e Keywords'), { 'classes': ('collapse closed',), 'fields': ( 'inspire', 'topic_category_ext', 'gemetkeywords' ) }), (_('Geographic extent'), { 'classes': ('collapse',), 'fields': ( ('ref_sys', 'geographic_bounding_box'), #'geo', ('vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent') ) }), (_('Temporal extent'), { 'classes': ('collapse',), 'fields': [] }), (_('DataQuality'), { 'classes': ('collapse closed',), 'fields': ( 'lineage', ('equivalent_scale', 'distance', 'uom_distance') ) }), (_('Conformity'), { 'classes': ('collapse closed',), 'fields': []
hachreak/invenio-previewer
invenio_previewer/utils.py
Python
gpl-2.0
1,963
0
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2016 CERN. # # Invenio is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that
it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # M
ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Invenio Previewer Utilities.""" import cchardet from flask import current_app def detect_encoding(fp, default=None): """Detect the cahracter encoding of a file. :param fp: Open Python file pointer. :param default: Fallback encoding to use. :returns: The detected encoding. .. note:: The file pointer is returned at its original read position. """ init_pos = fp.tell() try: sample = fp.read( current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024)) # Result contains 'confidence' and 'encoding' result = cchardet.detect(sample) threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9) if result.get('confidence', 0) > threshold: return result.get('encoding', default) else: return default except Exception: current_app.logger.warning('Encoding detection failed.', exc_info=True) return default finally: fp.seek(init_pos)
konradxyz/cloudify-manager
rest-service/manager_rest/test/test_provider_context.py
Python
apache-2.0
2,268
0
######### # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from cloudify_rest_client import exceptions from base_test import BaseServerTestCase class ProviderContextTestCase(BaseServerTestCase): def test_post_provider_context(self): result = self.post('/provider/context', data={ 'name': 'test_provider', 'context': {'key': 'value'} }) self.assertEqual(result.status_code, 201)
self.assertEqual(result.json['status'], 'ok') def
test_get_provider_context(self): self.test_post_provider_context() result = self.get('/provider/context').json self.assertEqual(result['context']['key'], 'value') self.assertEqual(result['name'], 'test_provider') def test_post_provider_context_twice_fails(self): self.test_post_provider_context() self.assertRaises(self.failureException, self.test_post_provider_context) def test_update_provider_context(self): self.test_post_provider_context() new_context = {'key': 'new-value'} self.client.manager.update_context( 'test_provider', new_context) context = self.client.manager.get_context() self.assertEqual(context['context'], new_context) def test_update_empty_provider_context(self): try: self.client.manager.update_context( 'test_provider', {'key': 'value'}) self.fail('Expected failure due to existing context') except exceptions.CloudifyClientError as e: self.assertEqual(e.status_code, 404) self.assertEqual(e.message, 'Provider Context not found')
Fokko/incubator-airflow
tests/gcp/hooks/test_text_to_speech.py
Python
apache-2.0
2,693
0.001857
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the L
icense is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from airflow.gcp.hooks.text_to_speech import CloudTextT
oSpeechHook from tests.compat import PropertyMock, patch from tests.gcp.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id INPUT = {"text": "test text"} VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"} AUDIO_CONFIG = {"audio_encoding": "MP3"} class TestTextToSpeechHook(unittest.TestCase): def setUp(self): with patch( "airflow.gcp.hooks.base.CloudBaseHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.gcp_text_to_speech_hook = CloudTextToSpeechHook(gcp_conn_id="test") @patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.client_info", new_callable=PropertyMock) @patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook._get_credentials") @patch("airflow.gcp.hooks.text_to_speech.TextToSpeechClient") def test_text_to_speech_client_creation(self, mock_client, mock_get_creds, mock_client_info): result = self.gcp_text_to_speech_hook.get_conn() mock_client.assert_called_once_with( credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value ) self.assertEqual(mock_client.return_value, result) self.assertEqual(self.gcp_text_to_speech_hook._client, result) @patch("airflow.gcp.hooks.text_to_speech.CloudTextToSpeechHook.get_conn") def test_synthesize_speech(self, get_conn): synthesize_method = get_conn.return_value.synthesize_speech synthesize_method.return_value = None self.gcp_text_to_speech_hook.synthesize_speech( input_data=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG ) synthesize_method.assert_called_once_with( input_=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG, retry=None, timeout=None )
3lnc/elasticsearch-dsl-py
test_elasticsearch_dsl/test_integration/test_examples/test_completion.py
Python
apache-2.0
518
0.001938
# -*- coding: utf-8 -*- from __future__ import unicode_literals from .completion import Person def test_person_suggests_on_all_variants_of_name(write_client): Person.init(using=write_client) Person(name='Honza Král', popularity=42).save(refresh=True) s = Person.search().suggest('t', 'kra', completion={'field': 'suggest'}) resp
onse = s.exe
cute() opts = response.suggest.t[0].options assert 1 == len(opts) assert opts[0]._score == 42 assert opts[0]._source.name == 'Honza Král'
napjon/moocs_solution
robotics-udacity/2.4.py
Python
mit
465
0.017204
#this module here is to compute the formula to calculate the new means and #new variance. def update(mean1, var1, mean2, var2): new_mean = ((mean1 * var2) + (mean2*var1))/(var1 + var2) new_var = 1/(1/var1 + 1/var2) return [new_mean, new_var] def predict(mean1, var1, mean2, var2): new_mean = mean1 + mean2 new_var = var1 + var2
return [new_mean, new_var] print 'upda
te : 'update(10.,4., 12.,4.) print predict(10.,4., 12.,4.)
nicoddemus/pytest
src/_pytest/logging.py
Python
mit
29,805
0.001309
"""Access and control log capturing.""" import logging import os import re import sys from contextlib import contextmanager from io import StringIO from pathlib import Path from typing import AbstractSet from typing import Dict from typing import Generator from typing import List from typing import Mapping from typing import Optional from typing import Tuple from typing import TypeVar from typing import Union from _pytest import nodes from _pytest._io import TerminalWriter from _pytest.capture import CaptureManager from _pytest.compat import final from _pytest.compat import nullcontext from _pytest.config import _strtobool from _pytest.config import Config from _pytest.config import create_terminal_writer from _pytest.config import hookimpl from _pytest.config import UsageError from _pytest.config.argparsing import Parser from _pytest.deprecated import check_ispytest from _pytest.fixtures import fixture from _pytest.fixtures import FixtureRequest from _pytest.main import Session from _pytest.store import StoreKey from _pytest.terminal import TerminalReporter DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" _ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") caplog_handler_key = StoreKey["LogCaptureHandler"]() caplog_records_key = StoreKey[Dict[str, List[logging.LogRecord]]]() def _remove_ansi_escape_sequences(text: str) -> str: return _ANSI_ESCAPE_SEQ.sub("", text) class ColoredLevelFormatter(logging.Formatter): """A logging formatter which colorizes the %(levelname)..s part of the log format passed to __init__.""" LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { logging.CRITICAL: {"red"}, logging.ERROR: {"red", "bold"}, logging.WARNING: {"yellow"}, logging.WARN: {"yellow"}, logging.INFO: {"green"}, logging.DEBUG: {"purple"}, logging.NOTSET: set(), } LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*s)") def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._original_fmt = self._style._fmt self._level_to_fmt_mapping: Dict[int, str] = {} assert self._fmt is not None levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) if not levelname_fmt_match: return levelname_fmt = levelname_fmt_match.group() for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): formatted_levelname = levelname_fmt % { "levelname": logging.getLevelName(level) } # add ANSI escape sequences around the formatted levelname color_kwargs = {name: True for name in color_opts} colorized_formatted_levelname = terminalwriter.markup( formatted_levelname, **color_kwargs ) self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( colorized_formatted_levelname, self._fmt ) def format(self, record: logging.LogRecord) -> str: fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) self._style._fmt = fmt return super().format(record) class PercentStyleMultiline(logging.PercentStyle): """A logging style with special support for multiline messages. If the message of a record consists of multiple lines, this style formats the message as if each line were logged separately. """ def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None: super().__init__(fmt) self._auto_indent = self._get_auto_indent(auto_indent) @staticmethod def _update_message( record_dict: Dict[str, object], message: str ) -> Dict[str, object]: tmp = record_dict.copy() tmp["message"] = message return tmp @staticmethod def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int: """Determine the current auto indentation setting. Specify auto indent behavior (on/off/fixed) by passing in extra={"auto_indent": [value]} to the call to logging.log() or using a --log-auto-indent [value] command line or the log_auto_indent [value] config option. Default behavior is auto-indent off. Using the string "True" or "on" or the boolean True as the value turns auto indent on, using the string "False" or "off" or the boolean False or the int 0 turns it off, and specifying a positive integer fixes the indentation position to the value specified. Any other values for the option are invalid, and will silently be converted to the default. :param None|bool|int|str auto_indent_option: User specified option for indentation from command line, config or extra kwarg. Accepts int, bool or str. str option accepts the same range of values as boolean config options, as well as positive integers represented in str form. :returns: Indentation value, which can be -1 (automatically determine indentation) or 0 (auto-indent turned off) or >0 (explicitly set indentation position). """ if auto_indent_option is None: return 0 elif isinstance(auto_indent_option, bool): if auto_indent_option: return -1 else: return 0 elif isinstance(auto_indent_option, int): return int(auto_indent_option) elif isinstance(auto_indent_option, str): try: return int(auto_indent_option) except ValueError: pass try: if _strtobool(auto_indent_option): return -1 except ValueError: return 0 return 0 def format(self, record: logging.LogRecord) -> str: if "\n" in record.message: if hasattr(record, "auto_indent"): # Passed in from the "extra={}" kwarg on the call to logging.log(). auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined] else: auto_indent = self._auto_indent if auto_indent: lines = record.message.splitlines() formatted = self._fmt % self._update_message(record.__dict__, lines[0]) if auto_indent < 0: indentation = _remove_ansi_escape_sequences(formatted).find( lines[0] ) else: # Optimizes logging by allowing a fixed indentation. indentation = auto_indent lines[0] = formatted return ("\n" + " " * indentation).join(lines) return self._fmt % record.__dict__ def get_option_ini(config: Config, *names: str): for name in names: ret = config.getoption(name) # 'default' arg won't work as expected if ret is None: ret = config.getini(name)
if ret: return ret def pytest_addoption(parser: Parser) -> None: """Add options to control lo
g capturing.""" group = parser.getgroup("logging") def add_option_ini(option, dest, default=None, type=None, **kwargs): parser.addini( dest, default=default, type=type, help="default value for " + option ) group.addoption(option, dest=dest, **kwargs) add_option_ini( "--log-level", dest="log_level", default=None, metavar="LEVEL", help=( "level of messages to catch/display.\n" "Not set by default, so it depends on the root/parent log handler's" ' effective level, where it is "WARNING" by default.' ), ) add_option_ini( "--log-format", dest="log_format", default=DEFAULT_LOG_FORMAT, help="log format as used by the logging module.", ) add_option_ini( "--l
rivimey/rwmapmaker
zziplib/docs/zzipdoc/htm2dbk.py
Python
gpl-3.0
7,044
0.017888
#! /usr/bin/env python """ this file converts simple html text into a docbook xml variant. The mapping of markups and links is far from perfect. But all we want is the docbook-to-pdf converter and similar technology being present in the world of docbook-to-anything converters. """ from datetime import date import match import sys m = match.Match class htm2dbk_conversion_base: regexlist = [ m()("</[hH]2>(.*)", "m") >> "</title>\n<subtitle>\\1</subtitle>", m()("<[hH]2>") >> "<sect1 id=\"--filename--\"><title>", m()("<[Pp]([> ])","m") >> "<para\\1", m()("</[Pp]>") >> "</para>", m()("<(pre|PRE)>") >> "<screen>", m()("</(pre|PRE)>") >> "</screen>", m()("<[hH]3>") >> "<sect2><title>", m()("</[hH]3>((?:.(?!<sect2>))*.?)", "s") >> "</title>\\1</sect2>", m()("<!doctype [^<>]*>","s") >> "", m()("<!DOCTYPE [^<>]*>","s") >> "", m()("(<\w+\b[^<>]*\swidth=)(\d+\%)","s") >> "\\1\"\\2\"", m()("(<\w+\b[^<>]*\s\w+=)(\d+)","s") >> "\\1\"\\2\"", m()("&&") >> "\&amp\;\&amp\;", m()("\$\<") >> "\$\&lt\;", m()("&(\w+[\),])") >> "\&amp\;\\1", m()("(</?)span(\s[^<>]*)?>","s") >> "\\1phrase\\2>", m()("(</?)small(\s[^<>]*)?>","s") >> "\\1note\\2>", m()("(</?)(b|em|i)>")>> "\\1emphasis>", m()("(</?)(li)>") >> "\\1listitem>", m()("(</?)(ul)>") >> "\\1itemizedlis
t>", m()("(</?)(ol)>") >> "\\1orderedlist>", m()("(</?)(dl)>") >> "\\1variablelist>", m()("<dt\b([^<>]*)>","s") >> "<varlistentry\\1><term>", m()("</dt\b([^<>]*)>","s") >> "</term>", m()("<dd\b([^<>]*)>","s") >> "<listitem\\1>", m(
)("</dd\b([^<>]*)>","s") >> "</listitem></varlistentry>", m()("<table\b([^<>]*)>","s") >> "<informaltable\\1><tgroup cols=\"2\"><tbody>", m()("</table\b([^<>]*)>","s") >> "</tbody></tgroup></informaltable>", m()("(</?)tr(\s[^<>]*)?>","s") >> "\\1row\\2>", m()("(</?)td(\s[^<>]*)?>","s") >> "\\1entry\\2>", m()("<informaltable\b[^<>]*>\s*<tgroup\b[^<>]*>\s*<tbody>"+ "\s*<row\b[^<>]*>\s*<entry\b[^<>]*>\s*<informaltable\b","s") >> "<informaltable", m()("</informaltable>\s*</entry>\s*</row>"+ "\s*</tbody>\s*</tgroup>\s*</informaltable>", "s") >> "</informaltable>", m()("(<informaltable[^<>]*\swidth=\"100\%\")","s") >> "\\1 pgwide=\"1\"", m()("(<tbody>\s*<row[^<>]*>\s*<entry[^<>]*\s)(width=\"50\%\")","s") >> "<colspec colwidth=\"1*\" /><colspec colwidth=\"1*\" />\n\\1\\2", m()("<nobr>([\'\`]*)<tt>") >> "<cmdsynopsis>\\1", m()("</tt>([\'\`]*)</nobr>") >> "\\1</cmdsynopsis>", m()("<nobr><(?:tt|code)>([\`\"\'])") >> "<cmdsynopsis>\\1", m()("<(?:tt|code)><nobr>([\`\"\'])") >> "<cmdsynopsis>\\1", m()("([\`\"\'])</(?:tt|code)></nobr>") >> "\\1</cmdsynopsis>", m()("([\`\"\'])</nobr></(?:tt|code)>") >> "\\1</cmdsynopsis>", m()("(</?)tt>") >> "\\1constant>", m()("(</?)code>") >> "\\1literal>", m()(">([^<>]+)<br>","s") >> "><highlights>\\1</highlights>", m()("<br>") >> "<br />", # m()("<date>") >> "<sect1info><date>", # m()("</date>") >> "</date></sect1info>", m()("<reference>") >> "<reference id=\"reference\">" >> 1, m()("<a\s+href=\"((?:http|ftp|mailto):[^<>]+)\"\s*>((?:.(?!</a>))*.)</a>" ,"s") >> "<ulink url=\"\\1\">\\2</ulink>", m()("<a\s+href=\"zziplib.html\#([\w_]+)\"\s*>((?:.(?!</a>))*.)</a>","s") >> "<link linkend=\"$1\">$2</link>", m()("<a\s+href=\"(zziplib.html)\"\s*>((?:.(?!</a>))*.)</a>","s") >> "<link linkend=\"reference\">$2</link>", m()("<a\s+href=\"([\w-]+[.]html)\"\s*>((?:.(?!</a>))*.)</a>","s") >> "<link linkend=\"\\1\">\\2</link>", m()("<a\s+href=\"([\w-]+[.](?:h|c|am|txt))\"\s*>((?:.(?!</a>))*.)</a>" ,"s") >> "<ulink url=\"file:\\1\">\\2</ulink>", m()("<a\s+href=\"([A-Z0-9]+[.][A-Z0-9]+)\"\s*>((?:.(?!</a>))*.)</a>","s") >> "<ulink url=\"file:\\1\">\\2</ulink>" # m()("(</?)subtitle>") >> "\\1para>" # $_ .= "</sect1>" if /<sect1[> ]/ ] regexlist2 = [ m()(r"<br\s*/?>") >> "", m()(r"(</?)em>") >> r"\1emphasis>", m()(r"<code>") >> "<userinput>", m()(r"</code>") >> "</userinput>", m()(r"<link>") >> "<function>", m()(r"</link>") >> "</function>", m()(r"(?s)\s*</screen>") >> "</screen>", # m()(r"<ul>") >> "</para><programlisting>\n", # m()(r"</ul>") >> "</programlisting><para>", m()(r"<ul>") >> "<itemizedlist>", m()(r"</ul>") >> "</itemizedlist>", # m()(r"<li>") >> "", # m()(r"</li>") >> "" m()(r"<li>") >> "<listitem><para>", m()(r"</li>") >> "</para></listitem>\n", ] class htm2dbk_conversion(htm2dbk_conversion_base): def __init__(self): self.version = "" # str(date.today) self.filename = "." def convert(self,text): # $text txt = text.replace("<!--VERSION-->", self.version) for conv in self.regexlist: txt &= conv return txt.replace("--filename--", self.filename) def convert2(self,text): # $text txt = text.replace("<!--VERSION-->", self.version) for conv in self.regexlist: txt &= conv return txt class htm2dbk_document(htm2dbk_conversion): """ create document, add(text) and get the value() """ doctype = ( '<!DOCTYPE book PUBLIC "-//OASIS//DTD'+ ' DocBook XML V4.1.2//EN"'+"\n"+ ' "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd">'+ "\n") book_start = '<book><chapter><title>Documentation</title>'+"\n" book_end_chapters = '</chapter>'+"\n" book_end = '</book>'+"\n" def __init__(self): htm2dbk_conversion.__init__(self) self.text = self.doctype + self.book_start def add(self,text): if self.text & m()("<reference"): self.text += self.book_end_chapters ; self.book_end_chapters = "" self.text += self.convert(text).replace( "<br />","") & ( m()("<link>([^<>]*)</link>") >> "<function>\\1</function>") & ( m()("(?s)(<refentryinfo>\s*)<sect1info>" + "(<date>[^<>]*</date>)</sect1info>") >> "\\1\\2") def value(self): return self.text + self.book_end_chapters + self.book_end def htm2dbk_files(args): doc = htm2dbk_document() for filename in args: try: f = open(filename, "r") doc.filename = filename doc.add(f.read()) f.close() except IOError, e: print >> sys.stderr, "can not open "+filename return doc.value() def html2docbook(text): """ the C comment may contain html markup - simulate with docbook tags """ return htm2dbk_conversion().convert2(text) if __name__ == "__main__": print htm2dbk_files(sys.argv[1:])
sigmavirus24/twine
twine/cli.py
Python
apache-2.0
2,154
0
# Copyright 2013 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "Li
cense"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LIC
ENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import argparse import pkg_resources import setuptools import clint import requests import requests_toolbelt import pkginfo import twine from twine._installed import Installed def _registered_commands(group='twine.registered_commands'): registered_commands = pkg_resources.iter_entry_points(group=group) return dict((c.name, c) for c in registered_commands) def list_dependencies_and_versions(): return [ ('pkginfo', Installed(pkginfo).version), ('requests', requests.__version__), ('setuptools', setuptools.__version__), ('requests-toolbelt', requests_toolbelt.__version__), ('clint', clint.__version__), ] def dep_versions(): return ', '.join( '{0}: {1}'.format(*dependency) for dependency in list_dependencies_and_versions() ) def dispatch(argv): registered_commands = _registered_commands() parser = argparse.ArgumentParser(prog="twine") parser.add_argument( "--version", action="version", version="%(prog)s version {0} ({1})".format(twine.__version__, dep_versions()), ) parser.add_argument( "command", choices=registered_commands.keys(), ) parser.add_argument( "args", help=argparse.SUPPRESS, nargs=argparse.REMAINDER, ) args = parser.parse_args(argv) main = registered_commands[args.command].load() main(args.args)
flacjacket/sympy
sympy/solvers/polysys.py
Python
bsd-3-clause
9,192
0.001523
"""Solvers of systems of polynomial equations. """ from sympy.polys import Poly, groebner, roots from sympy.polys.polytools import parallel_poly_from_expr from sympy.polys.polyerrors import (ComputationFailed, PolificationFailed, CoercionFailed) from sympy.utilities import postfixes from sympy.simplify import rcollect from sympy.core import S class SolveFailed(Exception): """Raised when solver's conditions weren't met. """ def solve_poly_system(seq, *gens, **args): """ Solve a system of polynomial equations. Examples ======== >>> from sympy import solve_poly_system >>> from sympy.abc import x, y >>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y) [(0, 0), (2, -sqrt(2)), (2, sqrt(2))] """ try: polys, opt = parallel_poly_from_expr(seq, *gens, **args) except PolificationFailed, exc: raise ComputationFailed('solve_poly_system', len(seq), exc) if len(polys) == len(opt.gens) == 2: f, g = polys a, b = f.degree_list() c, d = g.degree_list() if a <= 2 and b <= 2 and c <= 2 and d <= 2: try: return solve_biquadratic(f, g, opt) except SolveFailed: pass return solve_generic(polys, opt) def solve_biquadratic(f, g, opt): """Solve a system of two bivariate quadratic polynomial equations. Examples ======== >>> from sympy.polys import Options, Poly >>> from sympy.abc import x, y >>> from sympy.solvers.polysys import solve_biquadratic >>> NewOption = Options((x, y), {'domain': 'ZZ'}) >>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ') >>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ') >>> solve_biquadratic(a, b, NewOption) [(1/3, 3), (41/27, 11/9)] >>> a = Poly(y + x**2 - 3, y, x, domain='ZZ') >>> b = Poly(-y + x - 4, y, x, domain='ZZ') >>> solve_biquadratic(a, b, NewOption) [(-sqrt(29)/2 + 7/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + sqrt(29)/2)] """ G = groebner([f, g]) if len(G) == 1 and G[0].is_ground: return None if len(G) != 2: raise SolveFailed p, q = G x, y = opt.gens p = Poly(p, x, expand=False) q = q.ltrim(-1) p_roots = [ rcollect(expr, y) for expr in roots(p).keys() ] q_roots = roots(q).keys() solutions = [] for q_root in q_roots: for p_root in p_roots: solution = (p_root.subs(y, q_root), q_root) solutions.append(solution) return sorted(solutions) def solve_generic(polys, opt): """ Solve a generic system of polynomial equations. Returns all possible solutions over C[x_1, x_2, ..., x_m] of a set F = { f_1, f_2, ..., f_n } of polynomial equations, using Groebner basis approach. For now only zero-dimensional systems are supported, which means F can have at most a finite number of solutions. The algorithm works by the fact that, supposing G is the basis of F with respect to an elimination order (here lexicographic order is used), G and F generate the same ideal, they have the same set of solutions. By the elimination property, if G is a reduced, zero-dimens
ional Groebner basis, then there exists an univariate polynomial in G (in its last variable). This can be solved by computing its roots. Substituting all computed roots for the last (eliminate
d) variable in other elements of G, new polynomial system is generated. Applying the above procedure recursively, a finite number of solutions can be found. The ability of finding all solutions by this procedure depends on the root finding algorithms. If no solutions were found, it means only that roots() failed, but the system is solvable. To overcome this difficulty use numerical algorithms instead. References ========== .. [Buchberger01] B. Buchberger, Groebner Bases: A Short Introduction for Systems Theorists, In: R. Moreno-Diaz, B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01, February, 2001 .. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and Algorithms, Springer, Second Edition, 1997, pp. 112 Examples ======== >>> from sympy.polys import Poly, Options >>> from sympy.solvers.polysys import solve_generic >>> from sympy.abc import x, y >>> NewOption = Options((x, y), {'domain': 'ZZ'}) >>> a = Poly(x - y + 5, x, y, domain='ZZ') >>> b = Poly(x + y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(-1, 4)] >>> a = Poly(x - 2*y + 5, x, y, domain='ZZ') >>> b = Poly(2*x - y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(11/3, 13/3)] >>> a = Poly(x**2 + y, x, y, domain='ZZ') >>> b = Poly(x + y*4, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(0, 0), (1/4, -1/16)] """ def _is_univariate(f): """Returns True if 'f' is univariate in its last variable. """ for monom in f.monoms(): if any(m > 0 for m in monom[:-1]): return False return True def _subs_root(f, gen, zero): """Replace generator with a root so that the result is nice. """ p = f.as_expr({gen: zero}) if f.degree(gen) >= 2: p = p.expand(deep=False) return p def _solve_reduced_system(system, gens, entry=False): """Recursively solves reduced polynomial systems. """ if len(system) == len(gens) == 1: zeros = roots(system[0], gens[-1]).keys() return [ (zero,) for zero in zeros ] basis = groebner(system, gens, polys=True) if len(basis) == 1 and basis[0].is_ground: if not entry: return [] else: return None univariate = filter(_is_univariate, basis) if len(univariate) == 1: f = univariate.pop() else: raise NotImplementedError("only zero-dimensional systems supported (finite number of solutions)") gens = f.gens gen = gens[-1] zeros = roots(f.ltrim(gen)).keys() if not zeros: return [] if len(basis) == 1: return [ (zero,) for zero in zeros ] solutions = [] for zero in zeros: new_system = [] new_gens = gens[:-1] for b in basis[:-1]: eq = _subs_root(b, gen, zero) if eq is not S.Zero: new_system.append(eq) for solution in _solve_reduced_system(new_system, new_gens): solutions.append(solution + (zero,)) return solutions try: result = _solve_reduced_system(polys, opt.gens, entry=True) except CoercionFailed: raise NotImplementedError if result is not None: return sorted(result) else: return None def solve_triangulated(polys, *gens, **args): """ Solve a polynomial system using Gianni-Kalkbrenner algorithm. The algorithm proceeds by computing one Groebner basis in the ground domain and then by iteratively computing polynomial factorizations in appropriately constructed algebraic extensions of the ground domain. Examples ======== >>> from sympy.solvers.polysys import solve_triangulated >>> from sympy.abc import x, y, z >>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1] >>> solve_triangulated(F, x, y, z) [(0, 0, 1), (0, 1, 0), (1, 0, 0)] References ========== 1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra, Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989 """ G = groebner(polys, gens, polys=True) G = list(reversed(G)) domain = args.get('domain') if domain is not None: for i, g in enumerate(G): G[i] = g.set_domain(domain) f, G = G[0].ltrim(-1), G[1:] dom = f.get_domain() zeros = f.ground_roots() solutions = set([]) for zero in zeros: solutions.add(((zero,), dom)) var_s
dudanogueira/microerp
microerp/almoxarifado/migrations/0004_auto_20141006_1957.py
Python
lgpl-3.0
6,177
0.004371
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime class Migration(migrations.Migration): dependencies = [ ('rh', '0001_initial'), ('estoque', '0005_auto_20141001_0953'), ('comercial', '0007_auto_20141006_1852'), ('almoxarifado', '0003_auto_20140917_0843'), ] operations = [ migrations.CreateModel( name='LinhaListaMaterial', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade_requisitada', models.DecimalField(max_digits=10, de
cimal_places=2)), ('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='LinhaListaMaterialCompra', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('quantidade', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='LinhaListaMaterialEntregue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('quantidade', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialCompra', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ativa', models.BooleanField(default=True)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialDoContrato', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ativa', models.BooleanField(default=True)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialEntregue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('entregue', models.BooleanField(default=False)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)), ('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')), ('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='linhalistamaterialentregue', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialentregue', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialcompra', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialcompra', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterial', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterial', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), ]
MartinGHub/lvr-sat
SAT/bool.py
Python
bsd-3-clause
6,053
0.000991
__author__ = "Martin Jakomin, Mateja Rojko" """ Classes for boolean operators: - Var - Neg - Or - And - Const Functions: - nnf - simplify - cnf - solve - simplify_cnf """ import itertools # functions def nnf(f): """ Returns negation normal form """ return f.nnf() def simplify(f): """ Simplifies the expression """ return nnf(f).simplify() def cnf(f): """ Returns conjunctive normal form """ return nnf(f).cnf().simplify() def solve(f, v): """ Solves the expression using the variable values v """ return f.solve(v) def simplify_cnf(f, v): """ Simplifies the cnf form using the variable values v """ return cnf(f).simplify_cnf(v).simplify() # classes class Var(): """ Variable """ def __init__(self, name): self.name = name def __str__(self): return self.name def solve(self, v): return v[self.name] def simplify_cnf(self, v): if self.name in v: return Const(v[self.name]) else: return self def nnf(self): return self def simplify(self): return self def cnf(self): return self def length(self): return 1 class Neg(): """ Negation operator """ def __init__(self,v): self.value = v def __str__(self): return "~" + str(self.value.__str__()) def solve(self, v): return not(self.value.solve(v)) def simplify_cnf(self, v): if self.value.name in v: return Const(not(v[self.value.name])) else: return self def nnf(self): v = self.value if isinstance(v, Var): return Neg(v) elif isinstance(v, Neg): return v.value.nnf() elif isinstance(v, And): return Or([Neg(x) for x in v.value]).nnf() elif isinstance(v, Or): return And([Neg(x) for x in v.value]).nnf() elif isinstance(v, Const): return v.negate() def simplify(self): return self def cnf(self): return self def length(self): return self.value.length() class And(): """ And operator """ def __init__(self,lst): self.value = lst def __str__(self): s = "(" for i in self.value: s += str(i)+" & " s = s[:len(s)-3] return s + ")" def solve(self, v): for l in self.value: if l.solve(v) is False: return False return True def simplify_cnf(self, v): return And([x.simplify_cnf(v) for x in self.value]) def nnf(self): return And([x.nnf() for x in self.value]) def simplify(self): s = [x.simplify() for x in self.value] # And list flatten ns = [] for x in s: if isinstance(x, And): ns.extend(x.value) else: ns.append(x) s = ns snames = [x.simplify().__str__() for x in s] s2 = [] for i, x in enumerate(s): if Neg(x).nnf().__str__() in snames[i+1:]: return Const(False) elif isinstance(x, Const): if x.value is False: return Const(False) elif snames[i] not in snames[i+1:]: s2.append(x) if len(s2) < 1: return Const(True) elif len(s2) is 1: return s2[0] return And(s2) def cnf(self): return And([x.cnf().simplify() for x in self.value]) def length(self): return sum([x.length() for x in self.value]) class Or(): """ Or operator """ def __init__(self, lst): self.value = lst def __str__(self): s = "(" for i in self.value: s += str(i)+" | " s = s[:len(s)-3] return s + ")" def solve(self, v): for l in self.value: if l.solve(v) is True: return True return False def simplify_cnf(self, v): return Or([x.simplify_cnf(v) for x in self.value]) def nnf(self): return Or([x.nnf() for x in self.value]) def simplify(self): s = [x.simplify() for x in self.value] # Or list flatt
en ns = [] for x in s: if isinstance(x,Or): ns.extend(x.value) else: ns.appe
nd(x) s = ns snames = [x.simplify().__str__() for x in s] s2 = [] for i, x in enumerate(s): if Neg(x).nnf().__str__() in snames[i+1:]: return Const(True) elif isinstance(x, Const): if x.value is True: return Const(True) elif snames[i] not in snames[i+1:]: s2.append(x) if len(s2) < 1: return Const(False) elif len(s2) is 1: return s2[0] return Or(s2) def cnf(self): s = [x.cnf().simplify() for x in self.value] s1 = [x.value if isinstance(x, And) else [x] for x in s] s2 = [] for e in itertools.product(*s1): s3 = [] for x in e: if isinstance(x,Or): s3.extend(x.value) else: s3.append(x) s2.append(Or(s3)) if len(s2) is 1: return s2[0] return And(s2) def length(self): return sum([x.length() for x in self.value]) class Const(): """ Constant """ def __init__(self, c): self.value = c def __str__(self): return str(self.value) def solve(self, v): return self.value def simplify_cnf(self, v): return self def nnf(self): return self def negate(self): if self.value is True: return Const(False) return Const(True) def simplify(self): return self def cnf(self): return self def length(self): return 1
Alafazam/simple_projects
misc/test_tqdm.py
Python
mit
615
0.011382
from ti
me import sleep from tqdm import tqdm import requests url = "http://raw.githubusercontent.com/Alafazam/lecture_notes/master/Cormen%20.pdf" response = requests.get(url, stream=True) with open("10MB", "wb") as handle: total_length = int(response.headers.get('content-length'))/1024 for data in tqdm(response.iter_content(chunk_si
ze=1024),total=total_length, leave=True, unit='KB'): handle.write(data) # with open("10MB", 'wb') as f: # r = requests.get(url, stream=True) # for chunk in tqdm(r.iter_content()): # f.write(chunk) # from tqdm import tqdm # for i in tqdm(range(10000)): # sleep(0.01)
alexanderlz/redash
tests/tasks/test_refresh_schemas.py
Python
bsd-2-clause
934
0
from mock import patch from tests import BaseTestCase from redash.tasks import refresh_schemas class TestRefreshSchemas(BaseTestCase): def test_calls_refresh_of_all_data_sources(s
elf): self.factory.data_source # trigger creation with patch( "redash.tasks.queries.maintenance.refresh_schema.delay" ) as refresh_job: re
fresh_schemas() refresh_job.assert_called() def test_skips_paused_data_sources(self): self.factory.data_source.pause() with patch( "redash.tasks.queries.maintenance.refresh_schema.delay" ) as refresh_job: refresh_schemas() refresh_job.assert_not_called() self.factory.data_source.resume() with patch( "redash.tasks.queries.maintenance.refresh_schema.delay" ) as refresh_job: refresh_schemas() refresh_job.assert_called()
sidthakur/simple-user-management-api
user_auth/app/create_db.py
Python
gpl-3.0
299
0.040134
from pymong
o import MongoClient from passlib.app import custom_app_context as pwd client = MongoClient( host = "db" ) ride_sharing = client.ride_sharing users = ride_sharing.users users.insert_one( { 'username' : 'sid',
'password_hash' : pwd.encrypt( 'test' ), 'role' : 'driver' } )
tdean1995/HFPythonSandbox
dist/nester/nester.py
Python
apache-2.0
481
0.008316
"""This module prints lists that may or may not contain nested lists""" def print_lol(the_list): """This function takes a positional argument: called "the_list", which is any Python list which may i
nclude nested lists. Each data item in the provided
lists recursively printed to the screen on its own line.""" for each_item in the_list: if isinstance(each_item,list): print_lol(each_item) else: print(each_item)
alansammarone/mandelbrot
mandelbrot_image.py
Python
gpl-3.0
2,037
0.025037
import os import PIL import math import PIL from PIL import Image class MandelbrotImage: def __init__(self, folder): self.folder = folder self.data_folder = os.path.join(folder, 'data') self.image_folder = os.path.join(folder, 'image') if not os.path.isdir(self.image_folder): os.makedirs(self.image_folder) def list_data_files(self): fnames = [fname for fname in os.listdir(self.data_folder)] fnames = [fname for fname in fnames if fname.endswith('.data')] f
names.sort(key=lambda x: int(x.split(".")[0])) return fnames def data_file_to_data(self, filepath): with open(os.path.join(self.data_folder, filepath)) as file: data = file.read() data = data.split(" ") width, height, max_iterations, precision = data[:4] data = data[4:] return int(width), in
t(height), int(max_iterations), int(precision), data def data_to_pixel_data(self, data, coloring_scheme): pixel_data = [] for i in xrange(0, len(data), 3): escape_time = data[i] z_real = data[i+1] z_imag = data[i+2] color = coloring_scheme(escape_time, z_real, z_imag, max_iter) pixel_data.append(color) return pixel_data def pixel_data_to_image(self, filename, pixel_data, width, height): image = Image.new('RGB', (width, height)) image.putdata(pixel_data) image.save(os.path.join(self.image_folder, filename)) def coloring(escape_time, z_real, z_imag, max_iterations): escape_time = int(escape_time) z_real = float(z_real) z_imag = float(z_imag) max_iterations = int(max_iterations) if escape_time == max_iterations + 1: return (255, 255, 255) else: q = escape_time - math.log(math.log((z_real ** 2 + z_imag ** 2))/(2*math.log(2))) return (int(q*255./max_iterations), 0, 0) f = "1" A = MandelbrotImage("1") for idx, file in enumerate(A.list_data_files()): width, height, max_iter, precision, data = A.data_file_to_data(file) pixel_data = A.data_to_pixel_data(data, coloring) A.pixel_data_to_image("%s.png" % idx, pixel_data, width, height) print "Done with file %s" % file
anhstudios/swganh
data/scripts/templates/object/draft_schematic/bio_engineer/bio_component/shared_bio_component_food_duration_2.py
Python
mit
470
0.046809
#### NOTICE:
THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Intangible() result.template = "object/draft_schematic/bio_engineer/bio_component/shared_bio_component_food_duration_2.iff" result.attribute_template_id = -1 result.stfName("","") ##
## BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
davesnowdon/nao-wanderer
wanderer/src/main/python/wanderer/wanderer.py
Python
gpl-2.0
14,410
0.005968
''' Created on Jan 19, 2013 @author: dsnowdon ''' import os import tempfile import datetime import json import logging from naoutil.jsonobj import to_json_string, from_json_string from naoutil.general import find_class import robotstate from event import * from action import * from naoutil.naoenv import make_environment ''' Here we define the memory locations used to store state ''' MEM_SECURITY_DISTANCE = "WandererSecurityDistance" MEM_HEADING = "WandererWalkHeading" MEM_WALK_PATH = "WandererWalkPath" MEM_DETECTED_FACE_DIRECTION = "WandererFaceDirection" MEM_PLANNED_ACTIONS = "WandererActionsPlanned" MEM_CURRENT_ACTIONS = "WandererActionsInProgress" MEM_COMPLETED_ACTIONS = "WandererActionsCompleted" MEM_CURRENT_EVENT = "WandererEvent" MEM_MAP = "WandererMap" MEM_LOCATION = "WandererLocation" EVENT_LOOK_FOR_PEOPLE = "WandererEventLookForPeople" DEFAULT_CONFIG_FILE = "wanderer" PROPERTY_PLANNER_CLASS = "plannerClass" DEFAULT_PLANNER_CLASS = "wanderer.randomwalk.RandomWalk" PROPERTY_EXECUTOR_CLASS = "executorClass" DEFAULT_EXECUTOR_CLASS = "wanderer.wanderer.PlanExecutor" PROPERTY_MAPPER_CLASS = "mapperClass" DEFAULT_MAPPER_CLASS = "wanderer.wanderer.NullMapper" PROPERTY_UPDATER_CLASSES = "updaterClasses" PROPERTY_HTTP_PORT = "httpPort" DEFAULT_HTTP_PORT = 8080 PROPERTY_DATA_COLLECTOR_HOST = "dataCollectorHost" PROPERTY_DATA_COLLECTOR_PORT = "dataCollectorPort" PROPERTY_LOOK_FOR_PEOPLE = "lookForPeople" STATIC_WEB_DIR = "web" CENTRE_BIAS = False HEAD_HORIZONTAL_OFFSET = 0 WANDERER_NAME = "wanderer" # START GLOBALS # We put instances of planners, executors and mappers here so we don't need to continually create # new instances planner_instance = None executor_instance = None mapper_instance = None updater_instances = None # END GLOBALS wanderer_logger = logging.getLogger("wanderer.wanderer") def init_state(env, startPos): # declare events env.memory.declareEvent(EVENT_LOOK_FOR_PEOPLE); # getData & removeData throw errors if the value is not set, # so ensure all the memory locations we want to use are initialised env.memory.insertData(MEM_CURRENT_EVENT, None) # set "security distance" env.memory.insertData(MEM_SECURITY_DISTANCE, "0.25") # should we look for people as we go? lookForPeople = env.get_property(DEFAULT_CONFIG_FILE, PROPERTY_LOOK_FOR_PEOPLE) if lookForPeople: env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, True) env.log("Looking for people") else: env.memory.raiseEvent(EVENT_LOOK_FOR_PEOPLE, False) env.log("Not looking for people") # set initial position (in list of positions) env.memory.insertData(MEM_WALK_PATH, [startPos]) # current actions and completed actions env.memory.insertData(MEM_PLANNED_ACTIONS, "") env.memory.insertData(MEM_CURRENT_ACTIONS, "") env.memory.insertData(MEM_COMPLETED_ACTIONS, "") def shutdown(env): planner = get_planner_instance(env) planner.shutdown() executor = get_executor_instance(env, None) executor.shutdown() mapper = get_mapper_instance(env) mapper.shutdown() updater_instances = get_updaters(env) for updater in updater_instances: updater.shutdown() ''' Base class for wanderer planning. Handles generating plans and reacting to events ''' class Planner(object): def __init__(self, env_): super(Planner, self).__init__() self.env = env_ def handleEvent(self, event, state): plan = self.dispatch(event, state) save_plan(self.env, plan) log_plan(self.env, "New plan", plan) return plan # return true if this event should cause the current plan to be executed and # a new plan created to react to it def does_event_interrupt_plan(self, event, state): return True def dispatch(self, event, state): methodName = 'handle'+ event.name() try: method = getattr(self, methodName) return method(event, state) except AttributeError: self.env.log("Unimplemented event handler for: {}".format(event.name())) def shutdown(self): pass ''' Base class for executing plans. Since we may need to trigger choreographe boxes we delegate actually performing a single action to an actionExecutor which in most cases will be the choreographe box that called us. The actionExecutor must implement do_acti
on(action) and all_done() ''' class PlanExecutor(object): def __init__(self, env, actionExecutor): super(PlanExecutor, self).__init__() self.env = env self.actionExecutor = actionExecutor def perform_next_action(self): self.env.log("perform next action") # save completed action to history if there is one completedAction = get_current_action(self.env)
self.env.log("Completed action = {}".format(repr(completedAction))) if not completedAction is None: if not isinstance(completedAction, NullAction): push_completed_action(self.env, completedAction) # if we have moved, then save current location if isinstance(completedAction, Move): self._have_moved_wrapper() self.env.log("set current action to NullAction") # ensure that current action is cleared until we have another one set_current_action(self.env, NullAction()) self.env.log("pop from plan") # pop first action from plan action = pop_planned_action(self.env) if action is None: self.env.log("No next action") self.actionExecutor.all_done() else: self.env.log("Next action = {}".format(repr(action))) set_current_action(self.env, action) self.actionExecutor.do_action(action) self.env.log("perform_next_action done") # get current and previous positions and call have_moved # it's not intended that this method be overridden def _have_moved_wrapper(self): self.env.log("Have moved") pos = get_position(self.env) lastPos = get_last_position(self.env) self.have_moved(lastPos, pos) save_waypoint(self.env, pos) # hook for base classes to implement additional functionality # after robot has moved def have_moved(self, previousPos, currentPos): pass def save_position(self): pos = get_position(self.env) save_waypoint(self.env, pos) def shutdown(self): pass ''' Abstract mapping class ''' class AbstractMapper(object): def __init__(self, env): super(AbstractMapper, self).__init__() self.env = env # update map based on new sensor data def update(self, position, sensors): pass # return the current map def get_map(self): return None def shutdown(self): pass ''' Null mapper - does nothing, just a place holder for when no mapping is actually required ''' class NullMapper(AbstractMapper): def __init__(self, env): super(NullMapper, self).__init__(env) ''' Mapper that does no actual mapping, but logs all data to file for future analysis ''' class FileLoggingMapper(AbstractMapper): def __init__(self, env, save_data=True): super(FileLoggingMapper, self).__init__(env) self.save_data = save_data if self.save_data: self.open_data_file() # save the data to file def update(self, position, sensors): if self.save_data: self.save_update_data(position, sensors) def open_data_file(self): self.logFilename = tempfile.mktemp() self.env.log("Saving sensor data to {}".format(self.logFilename)) self.first_write = True try: self.logFile = open(self.logFilename, 'r+') except IOError: self.env.log("Failed to open file: {}".format(self.logFilename)) self.logFile = None def save_update_data(self, position, sensors): if self.logFile: data = { 'timestamp' : self.timestamp(), 'position' : position,
CroceRossaItaliana/jorvik
attivita/migrations/0013_partecipazione_centrale_operativa.py
Python
gpl-3.0
448
0
# -*-
coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attivita', '0012_attivita_centrale_operativa'), ] operations = [ migrations.AddField( model_name='partecipazione', name='centrale_operativa', field=models.BooleanField(default=False, db_index=True),
), ]
rero/reroils-app
rero_ils/modules/organisations/api.py
Python
gpl-2.0
7,019
0
# -*- coding: utf-8 -*- # # RERO ILS # Copyright (C) 2019 RERO # Copyright (C) 2020 UCLouvain # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """API for manipulating organisation.""" from functools import partial from elasticsearch.exceptions import NotFoundError from .models import OrganisationIdentifier, OrganisationMetadata from ..api import IlsRecord, IlsRecordsIndexer, IlsRecordsSearch from ..fetchers import id_fetcher from ..item_types.api import ItemTypesSearch from ..libraries.api import LibrariesSearch, Library from ..minters import id_minter from ..providers import Provider from ..utils import sorted_pids from ..vendors.api import Vendor, VendorsSearch # provider OrganisationProvider = type( 'OrganisationProvider', (Provider,), dict(identifier=OrganisationIdentifier, pid_type='org') ) # minter organisation_id_minter = partial(id_minter, provider=OrganisationProvider) # fetcher organisation_id_fetcher = partial(id_fetcher, provider=OrganisationProvider) class OrganisationsSearch(IlsRecordsSearch): """Organisation search.""" class Meta: """Meta class.""" index = 'organisations' doc_types = None fields = ('*', ) facets = {} default_filter = None def get_record_by_viewcode(self, viewcode, fields=None): """Search by viewcode.""" query = self.filter('term', code=viewcode).extra(size=1) if fields: query = query.source(includes=fields) response = query.execute() if response.hits.total.value != 1: raise NotFoundError( f'Organisation viewcode {viewcode}: Result not found.') return response.hits.hits[0]._source class Organisation(IlsRecord): """Organisation class.""" minter = organisation_id_minter fetcher = organisation_id_fetcher provider = OrganisationProvider model_cls = OrganisationMetadata @classmethod def get_all(cls): """Get all organisations.""" return sorted([ Organisation.get_record_by_id(_id) for _id in Organisation.get_all_ids() ], key=lambda org: org.get('name')) @classmethod def all_code(cls): """Get all code.""" return [org.get('code') for org in cls.get_all()] @classmethod def get_record_by_viewcode(cls, viewcode): """Get record by view code.""" result = OrganisationsSearch().filter( 'term', code=viewcode ).execute() if result['hits']['total']['value'] != 1: raise Exception( 'Organisation (get_record_by_viewcode): Result not found.') return result['hits']['hits'][0]['_source']
@classmethod def get_record_by_online_harvested_source(cls, source): """Get record by online harvested source. :param source: the record source :return: Organisation record or None. """
results = OrganisationsSearch().filter( 'term', online_harvested_source=source).scan() try: return Organisation.get_record_by_pid(next(results).pid) except StopIteration: return None @property def organisation_pid(self): """Get organisation pid .""" return self.pid def online_circulation_category(self): """Get the default circulation category for online resources.""" results = ItemTypesSearch().filter( 'term', organisation__pid=self.pid).filter( 'term', type='online').source(['pid']).scan() try: return next(results).pid except StopIteration: return None def get_online_locations(self): """Get list of online locations.""" return [library.online_location for library in self.get_libraries() if library.online_location] def get_libraries_pids(self): """Get all libraries pids related to the organisation.""" results = LibrariesSearch().source(['pid'])\ .filter('term', organisation__pid=self.pid)\ .scan() for result in results: yield result.pid def get_libraries(self): """Get all libraries related to the organisation.""" pids = self.get_libraries_pids() for pid in pids: yield Library.get_record_by_pid(pid) def get_vendor_pids(self): """Get all vendor pids related to the organisation.""" results = VendorsSearch().source(['pid'])\ .filter('term', organisation__pid=self.pid)\ .scan() for result in results: yield result.pid def get_vendors(self): """Get all vendors related to the organisation.""" pids = self.get_vendor_pids() for pid in pids: yield Vendor.get_record_by_pid(pid) def get_links_to_me(self, get_pids=False): """Record links. :param get_pids: if True list of linked pids if False count of linked records """ from ..acq_receipts.api import AcqReceiptsSearch library_query = LibrariesSearch()\ .filter('term', organisation__pid=self.pid) receipt_query = AcqReceiptsSearch() \ .filter('term', organisation__pid=self.pid) links = {} if get_pids: libraries = sorted_pids(library_query) receipts = sorted_pids(receipt_query) else: libraries = library_query.count() receipts = receipt_query.count() if libraries: links['libraries'] = libraries if receipts: links['acq_receipts'] = receipts return links def reasons_not_to_delete(self): """Get reasons not to delete record.""" cannot_delete = {} links = self.get_links_to_me() if links: cannot_delete['links'] = links return cannot_delete def is_test_organisation(self): """Check if this is a test organisation.""" if self.get('code') == 'cypress': return True return False class OrganisationsIndexer(IlsRecordsIndexer): """Holdings indexing class.""" record_cls = Organisation def bulk_index(self, record_id_iterator): """Bulk index records. :param record_id_iterator: Iterator yielding record UUIDs. """ super().bulk_index(record_id_iterator, doc_type='org')
HIPS/neural-fingerprint
neuralfingerprint/mol_graph.py
Python
mit
3,492
0.002864
import numpy as np from rdkit.Chem import MolFromSmiles from features import atom_features, bond_features degrees = [0, 1, 2, 3, 4, 5] class MolGraph(object): def __init__(self): self.nodes = {} # dict of lists of nodes, keyed by node type def new_node(self, ntype, features=None, rdkit_ix=None): new_node = Node(ntype, features, rdkit_ix) self.nodes.setdefault(ntype, []).append(new_node) return new_node def add_subgraph(self, subgraph): old_nodes = self.nodes new_nodes = subgraph.nodes for ntype in set(old_nodes.keys()) | set(new_nodes.keys()): old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, [])) def sort_nodes_by_degree(self, ntype): nodes_by_degree = {i : [] for i in degrees} for node in self.nodes[ntype]: nodes_by_degree[len(node.get_neighbors(ntype))].append(node) new_nodes = [] for degree in degrees: cur_nodes = nodes_by_degree[degree] self.nodes[(ntype, degree)] = cur_nodes new_nodes.extend(cur_nodes) self.nodes[ntype] = new_nodes def feature_array(self, ntype): assert ntype in self.nodes return np.array([node.features for node in self.nodes[ntype]]) def rdkit_ix_array(self): return np.array([node.rdkit_ix for node in self.nodes['atom']]) def neighbor_list(self, self_ntype, neighbor_ntype): assert self_ntype in self.nodes and neighbor_ntype in self.nodes neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])} return [[neighbor_idxs[neighbor] for neighbor in self_node.get_neighbors(neighbor_ntype)] for self_node in self.nodes[self_ntype]] class Node(object): __slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix'] def __init__(self, ntype, features, rdkit_ix): self.ntype = ntype self.features = features self._neighbors = [] self.rdkit_ix = rdkit_ix def add_neighbors(self, neighbor_list): for neighbor in neighbor_list: self._neighbors.append(neighbor) neighbor._neighbors.append(self) def get_neighbors(self, ntype): return [n for n in self._neighbors if n.ntype == ntype] def graph_from_smiles_tuple(smiles_tuple): graph_list = [graph_from_smi
les(s) for s in smiles_tuple] big_graph = MolGraph() for subgraph in graph_list: big_graph.add_
subgraph(subgraph) # This sorting allows an efficient (but brittle!) indexing later on. big_graph.sort_nodes_by_degree('atom') return big_graph def graph_from_smiles(smiles): graph = MolGraph() mol = MolFromSmiles(smiles) if not mol: raise ValueError("Could not parse SMILES string:", smiles) atoms_by_rd_idx = {} for atom in mol.GetAtoms(): new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx()) atoms_by_rd_idx[atom.GetIdx()] = new_atom_node for bond in mol.GetBonds(): atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()] atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()] new_bond_node = graph.new_node('bond', features=bond_features(bond)) new_bond_node.add_neighbors((atom1_node, atom2_node)) atom1_node.add_neighbors((atom2_node,)) mol_node = graph.new_node('molecule') mol_node.add_neighbors(graph.nodes['atom']) return graph
langcog/alignment
parsers/alignment.py
Python
gpl-2.0
7,365
0.035709
import csv import operator import itertools import math import logger1 import re #main piece of code for calculating & wwriting alignments from processed data def calculateAlignments(utterances, markers, smoothing, outputFile, shouldWriteHeader, corpusType='CHILDES'): markers = checkMarkers(markers) groupedUtterances = group(utterances) metaData = metaDataExtractor(groupedUtterances,markers,corpusType) results = runFormula(metaData, markers, smoothing,corpusType) writeFile(results, outputFile, shouldWriteHeader) return results # Converts list of markers in a message to categories def determineCategories(msgMarkers,catdict,useREs=False): msgCats = [] #iterate over catdict items {category: [words/REs]} for cd in catdict.items(): if useREs: if any(any(wordre.match(marker) for marker in msgMarkers) for wordre in cd[1]): #if REs, see if any tokens match each RE msgCats.append(cd[0]) else: if any(word in msgMarkers for word in cd[1]): #if just words, see if any word in category also in msg msgCats.append(cd[0]) return msgCats # Groups tweets by conversation ids def group(utterances): utterances.sort(key=operator.itemgetter('convId')) list1 = [] for key, items in itertools.groupby(utterances, operator.itemgetter('convId')): list1.append(list(items)) return list1 #code to convert marker list structure to {category: [words]} structure def makeCatDict(markers,useREs=False): mdict = {} for m in markers: marker = re.compile(''.join([m["marker"], '$'])) if useREs else m["marker"] if m["category"] in mdict: mdict[m["category"]].append(marker) else: mdict[m["category"]] = [marker] #mdict[m["category"]] = mdict.get(m["category"],[]).append(m["marker"]) #Need to swap marker and category labels #mdict[m["marker"]] = mdict.get(m["marker"],[]).append(m["category"]) return(mdict) #Given a conversation & the list of markers, extract counts of speaker & replier using each marker def findMarkersInConvo(markers,convo): ba = {} # Number of times Person A and person B says the marker["marker"] bna = {} nbna = {} nba = {} for utterance in convo: for j, marker in enumerate(markers): word = marker["marker"] msgMarker = word in utterance["msgMarkers"] replyMarker = word in utterance["replyMarkers"] if msgMarker and replyMarker: ba[word] = ba.get(word,0) + 1 elif replyMarker and not msgMarker: bna[word] = bna.get(word,0) + 1 elif not replyMarker and msgMarker: nba[word] = nba.get(word,0) + 1 else: nbna[word] = nbna.get(word,0) + 1 return({'ba': ba,'bna': bna,'nba': nba,'nbna': nbna}) #Copying portions of one dictionary to another (faster than copy(), if you can believe it!) def addFeats(toAppend,utterance,renameIds=True,corpusType=''): if renameIds: toAppend["speakerId"] = utterance["msgUserId"] toAppend["replierId"] = utterance["replyUserId"] else: toAppend["speakerId"] = utterance["speakerId"] toAppend["replierId"] = utterance["replierId"] if(corpusType=='Twitter'): toAppend["reciprocity"] = utterance["reciprocity"] toAppend["verifiedSpeaker"] = bool(utterance["verifiedSpeaker"]) toAppend["verifiedReplier"] = bool(utterance["verifiedReplier"]) toAppend["speakerFollowers"] = utterance["speakerFollowers"] toAppend["replierFollowers"] = utterance["replierFollowers"] elif(corpusType=='CHILDES'): toAppend["corpus"] = utterance["corpus"] toAppend["docId"] = utterance["docId"] return(toAppend) # calculates the marker usage counts from conversations def metaDataExtractor(groupedUtterances, markers,corpusType=''): results = [] for i, convo in enumerate(groupedUtterances): if(i % 2500 is 10): logger1.log("On " + str(i) + " of " + str(len(groupedUtterances))) toAppend = findMarkersInConvo(markers,convo) toAppend = addFeats(toAppend,convo[0],True,corpusType) results.append(toAppend) return results # extracts a list of markers from the marker dictionary def allMarkers(markers): categories = [] for marker in markers: categories.append(marker["marker"]) return list(set(categories)) # creates a dictionary corresponding to a single row of the final output (speaker-replier-marker triplet) def createAlignmentDict(category,result,smoothing,corpusType=''): toAppend = {} ba = int(result["ba"].get(category, 0)) bna = int(result["bna"].get(category, 0)) nbna = int(result["nbna"].get(category, 0)) nba = int(result["nba"].get(category, 0)) #Calculating alignment only makes sense if we've seen messages with and without the marker if (((ba+nba)==0 or (bna+nbna)==0)): return(None) toAppend = addFeats(toAppend,result,False,corpusType) toAppend["category"] = category #Calculating Echoes of Power alignment powerNum = ba powerDenom = ba+nba baseNum = ba+bna baseDenom = ba+nba+bna+nbna if(powerDenom != 0 and baseDenom != 0): dnmalignment = powerNum/powerDenom - baseNum/baseDenom toAppend["dnmalignment"] = dnmalignment else: toAppend["dnmalignment"] = False powerNum = ba powerDenom = ba+nba baseDenom = bna+nbna baseNum = bna powerProb = math.log((powerNum+smoothing)/float(powerDenom+2*smoothing)) baseProb = math.log((baseNum+smoothing)/float(baseDenom+2*smoot
hing)) alignment = powerProb - baseProb toAppend["alignment"] = alignment toAppend["ba"] = ba toAppend["bna"] = bna toAppend["nba"
] = nba toAppend["nbna"] = nbna return(toAppend) # Gets us from the meta-data to the final output file def runFormula(results, markers, smoothing,corpusType): toReturn = [] categories = allMarkers(markers) for i, result in enumerate(results): if(i % 1000 is 10): logger1.log("On result " + str(i) + " of " + str(len(results))) for j, category in enumerate(categories): toAppend = createAlignmentDict(category,result,smoothing,corpusType) if toAppend is not None: toReturn.append(toAppend) toReturn = sorted(toReturn, key=lambda k: (k["speakerId"],k["replierId"],k["category"])) return toReturn # Writes stuff to the output file def writeFile(results, outputFile, shouldWriteHeader): if len(results) == 0: logger1.log("No results to write =(") return toWrite = [] header = sorted(list(results[0].keys())) for row in results: toAppend = [] for key in header: toAppend.append(row[key]) toWrite.append(toAppend) if shouldWriteHeader: with open(outputFile, "w", newline='') as f: writer = csv.writer(f) writer.writerows([header]) f.close() with open(outputFile, "a", newline='') as f: writer = csv.writer(f) writer.writerows(toWrite) f.close() # Reads a list of markers from the markersFile def readMarkers(markersFile,dialect=None): if dialect is None: reader = csv.reader(open(markersFile)) else: reader = csv.reader(open(markersFile),dialect=dialect) markers = [] #print('marker\tcategory') for i, row in enumerate(reader): toAppend = {} toAppend["marker"] = row[0] if(len(row) > 1): toAppend["category"] = row[1] else: toAppend["category"] = row[0] markers.append(toAppend) #print(toAppend["marker"]+'\t'+toAppend["category"]) return markers # checks & adapts the structure of the marker list to the appropriate one def checkMarkers(markers): toReturn = [] for marker in markers: if isinstance(marker, str): toReturn.append({"marker": marker, "category": marker}) else: toReturn.append(marker) return toReturn
Orav/kbengine
kbe/src/lib/python/Lib/collections/__init__.py
Python
lgpl-3.0
43,096
0.003202
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] # For backwards compatibility, continue to make the collections ABCs # available through the collections module. from _collections_abc import * import _collections_abc __all__ += _collections_abc.__all__ from _collections import deque, defaultdict from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from _weakref import proxy as _proxy from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from reprlib import recursive_repr as _recursive_repr ################################################################################ ### OrderedDict ################################################################################ class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last==False). Raises KeyError if the element does not exist. When last=True, acts like a fast version of self[key]=self.pop(key). ''' link = self.__map[key] link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = ro
ot.prev link.prev = last link.next = root last.next = root.prev = link else: first = root.next link.prev = root link.next = first root.next = first.prev = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = MutableMapping.update keys = MutableMapping.keys values = MutableMapping.values items = MutableMapping.items __ne__ = MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) return self.__class__, (), inst_dict or None, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq
wiggi/huntercore
qa/rpc-tests/txn_clone.py
Python
mit
7,555
0.006353
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test proper accounting with an equivalent malleability clone # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TxnMallTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def add_options(self, parser): parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: return super(TxnMallTest, self).setup_network(True) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress! # Assign coins to foo and bar accounts: self.nodes[0].settxfee(.005) node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1,1) clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}] clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, None, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outpu
ts # 40 BTC serialized is 00286bee00000000 pos0 = 2*(4+1+36+1+4+1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or rawt
x1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40): output0 = clone_raw[pos0 : pos0 + output_len] output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) # Check node0's individual account balances. # "foo" should have been debited by the equivalent clone of tx1 assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"]) # "bar" should have been debited by (possibly unconfirmed) tx2 assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"]) # "" should have starting balance, less funding txes, plus subsidies assert_equal(self.nodes[0].getbalance("", 0), starting_balance - 1219 + fund_foo_tx["fee"] - 29 + fund_bar_tx["fee"] + 100) # Node1's "from0" account balance assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"])) if __name__ == '__main__': TxnMallTest().main()
Stargrazer82301/CAAPR
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/config/initialize_fit.py
Python
mit
1,745
0.005161
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** # Import the relevant PTS classes and modules from pts.core.basics.configuration import ConfigurationDefinition # ----------------------------------------------------------------- # Create the configuration definition = ConfigurationDefinition() # Add optional arguments definition.add_section("wavelengths") definition.sections["wavelengths"].add_optional("unit", str, "the unit of the wavelengths", "micron") definition.sections["wavelengths"].add_optional("min", float, "the minimum wavelength", 0.09) definition.sections["wavelengths"].add_optional("max", float, "the maximum wavelength", 2000) definition.sections["wavelengths"].add_optional("npoints", int, "the number of wavelength points", 100) definition.sections["wavelengths"].add_optional("min_zoom", float, "the minimum wavelength of the zoomed-in grid", 1) definition.sections["wavelengths"].add_optional("max_zoom", float, "the maximum wavelength of the zoomed-in grid", 30) definition.sections["wavelengths"].add_optional("npoints_zoom", int, "the number of wavelength points in the zoomed-in grid", 100) definition.add_optional("packages", float, "the number of photon packages per wavelen
gth", 2e5) definition.add_flag("selfabsorption", "enab
le dust self-absorption") definition.add_optional("dust_grid", str, "the type of dust grid to use (bintree, octtree or cartesian)", "bintree") # -----------------------------------------------------------------
Fewbytes/rubber-docker
levels/04_overlay/rd.py
Python
mit
5,063
0
#!/usr/bin/env python2.7 """Docker From Scratch Workshop - Level 4: Add overlay FS. Goal: Instead of re-extracting the image, use it as a read-only layer (lowerdir), and create a copy-on-write layer for changes (upperdir). HINT: Don't forget that overlay fs also requires a workdir. Read more on overlay FS here: https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt """ from __future__ import print_function import linux import tarfile import uuid import click import os import stat import traceback def _get_image_path(image_name, image_dir, image_suffix='tar'): return os.path.join(image_dir, os.extsep.join([image_name, image_suffix])) def _get_container_path(container_id, container_dir, *subdir_names): return os.path.join(container_dir, container_id, *subdir_names) def create_container_root(image_name, image_dir, container_id, container_dir): image_path = _get_image_path(image_name, image_dir) assert os.path.exists(image_path), "unable to locate image %s" % image_name # TODO: Instead of creating the container_root and extracting to it, # create an images_root. # keep only one rootfs per image and re-use it container_root = _get_container_path(container_id, container_dir, 'rootfs') if not os.path.exists(container_root): os.makedirs(container_root) with tarfile.open(image_path) as t: # Fun fact: tar files may contain *nix devices! *facepalm* members = [m for m in t.getmembers() if m.type not in (tarfile.CHRTYPE, tarfile.BLKTYPE)] t.extractall(container_root, members=members) # TODO: create directories for copy-on-write (uppperdir), overlay workdir, # and a mount point # TODO: mount the overlay (HINT: use the MS_NODEV flag to mount) return container_root # return the mountpoint for the mounted overlayfs @click.group() def cli(): pass def makedev(dev_path): for i, dev in enumerate(['stdin', 'stdout', 'stderr']): os.symlink('/proc/self/fd/%d' % i, os.path.join(dev_path, dev)) os.symlink('/proc/self/fd', os.path.join(dev_path, 'fd')) # Add extra devices DEVICES = {'null': (stat.S_IFCHR, 1, 3), 'zero': (stat.S_IFCHR, 1, 5), 'random': (stat.S_IFCHR, 1, 8), 'urandom': (stat.S_IFCHR, 1, 9), 'console': (stat.S_IFCHR, 136, 1), 'tty': (stat.S_IFCHR, 5, 0), 'full': (stat.S_IFCHR, 1, 7)} for device, (dev_type, major, minor) in DEVICES.iteritems(): os.mknod(os.path.join(dev_path, device), 0o666 | dev_type, os.makedev(major, minor)) def _create_mounts(new_root): # Create mounts (/proc, /sys, /dev) under new_root linux.mount('proc', os.path.join(new_root, 'proc'), 'proc', 0, '') linux.mount('sysfs', os.path.join(new_root, 'sys'), 'sysfs', 0, '') linux.mount('tmpfs', os.path.join(new_root, 'dev'), 'tmpfs', linux.MS_NOSUID | linux.MS_STRICTATIME, 'mode=755') # Add some basic devices devpts_path = os.path.join(new_root, 'dev', 'pts') if not os.path.exists(devpts_path): os.makedirs(devpts_path) linux.mount('devpts', devpts_path, 'devpts', 0, '') makedev(os.path.join(new_root,
'dev')) def contain(command, image_name, image_dir, container_id, container_dir): linux.unshare(linux.CLONE_NEWNS) # create a new mount namespace linux.mount(None, '/', None, linux.MS_PRIVATE | linux.MS_REC, N
one) new_root = create_container_root( image_name, image_dir, container_id, container_dir) print('Created a new root fs for our container: {}'.format(new_root)) _create_mounts(new_root) old_root = os.path.join(new_root, 'old_root') os.makedirs(old_root) linux.pivot_root(new_root, old_root) os.chdir('/') linux.umount2('/old_root', linux.MNT_DETACH) # umount old root os.rmdir('/old_root') # rmdir the old_root dir os.execvp(command[0], command) @cli.command(context_settings=dict(ignore_unknown_options=True,)) @click.option('--image-name', '-i', help='Image name', default='ubuntu') @click.option('--image-dir', help='Images directory', default='/workshop/images') @click.option('--container-dir', help='Containers directory', default='/workshop/containers') @click.argument('Command', required=True, nargs=-1) def run(image_name, image_dir, container_dir, command): container_id = str(uuid.uuid4()) pid = os.fork() if pid == 0: # This is the child, we'll try to do some containment here try: contain(command, image_name, image_dir, container_id, container_dir) except Exception: traceback.print_exc() os._exit(1) # something went wrong in contain() # This is the parent, pid contains the PID of the forked process # wait for the forked child, fetch the exit status _, status = os.waitpid(pid, 0) print('{} exited with status {}'.format(pid, status)) if __name__ == '__main__': cli()
mikekestemont/beckett
code/analysis.py
Python
mit
6,372
0.007062
#!usr/bin/env python # -*- coding: utf-8! -*- from collections import Counter, OrderedDict import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.cluster.hierarchy import ward, dendrogram from sklearn.decomposition import PCA from sklearn.metrics.pairwise import euclidean_distances from sklearn.cluster import AgglomerativeClustering from sklearn.metrics.pairwise import pairwise_distances from librosa.segment import agglomerative from HACluster import VNClusterer, Clusterer from sklearn.preprocessing import StandardScaler from ete3 import Tree, NodeStyle, TreeStyle, AttrFace, faces, TextFace class OrderedCounter(Counter, OrderedDict): 'Counter that remembers the order elements are first encountered' def __repr__(self): return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) def __reduce__(self): return self.__class__, (OrderedDict(self),) def pca_cluster(slice_matrix, slice_names, feature_names, prefix='en', nb_clusters=3): """ Run pca on matrix and visualize samples in 1st PCs, with word loadings projected on top. The colouring of the samples is provided by running a cluster analysis on the samples in these first dimensions. """ sns.set_style('dark') sns.plt.rcParams['axes.linewidth'] = 0.2 fig, ax1 = sns.plt.subplots() slice_matrix = StandardScaler().fit_transform(slice_matrix) pca = PCA(n_components=2) pca_matrix = pca.fit_transform(slice_matrix) pca_loadings = pca.components_.transpose() # first plot slices: x1, x2 = pca_matrix[:,0], pca_matrix[:,1] ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') # clustering on top (for colouring): clustering = AgglomerativeClustering(linkage='ward', affinity='euclidean', n_clusters=nb_clusters) clustering.fit(pca_matrix) # add slice names: for x, y, name, cluster_label in zip(x1, x2, slice_names, clustering.labels_): ax1.text(x, y, name.split('_')[0][:3], ha='center', va="center",
color=plt.cm.spectral(cluster_label / 10.), fontdict={'family': 'Arial', 'size': 10}) # now loadings on twin axis: ax2 = ax1.twinx().twiny() l1, l2 = pca_loadings[:,0], pca_loadings[:,1] ax2.scatter(l1, l2, 100, edgecolors='none', face
colors='none'); for x, y, l in zip(l1, l2, feature_names): ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey", fontdict={'family': 'Arial', 'size': 9}) # control aesthetics: ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_xticklabels([]) ax1.set_xticks([]) ax1.set_yticklabels([]) ax1.set_yticks([]) ax2.set_xticklabels([]) ax2.set_xticks([]) ax2.set_yticklabels([]) ax2.set_yticks([]) sns.plt.tight_layout() sns.plt.savefig('../outputs/'+prefix+'_pca.pdf', bbox_inches=0) plt.clf() def natural_cluster(slice_matrix, slice_names, prefix='en'): """ Perform plain cluster analysis on sample matrix, without taking into account the chronology of the corpus. """ slice_matrix = StandardScaler().fit_transform(slice_matrix) dist_matrix = pairwise_distances(slice_matrix, metric='euclidean') clusterer = Clusterer(dist_matrix, linkage='ward') clusterer.cluster(verbose=0) short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names] tree = clusterer.dendrogram.ete_tree(short_names) tree.write(outfile='../outputs/'+prefix+'_natural_clustering.newick') def vnc_cluster(slice_matrix, slice_names, prefix='en'): slice_matrix = StandardScaler().fit_transform(slice_matrix) dist_matrix = pairwise_distances(slice_matrix, metric='euclidean') clusterer = VNClusterer(dist_matrix, linkage='ward') clusterer.cluster(verbose=0) short_names = [l.split('_')[0][:5]+l.split('_')[1] for l in slice_names] t = clusterer.dendrogram.ete_tree(short_names) t.write(outfile='../outputs/'+prefix+"_vnc_clustering.newick") def segment_cluster(slice_matrix, slice_names, nb_segments): slice_matrix = StandardScaler().fit_transform(slice_matrix) slice_matrix = np.asarray(slice_matrix).transpose() # librosa assumes that data[1] = time axis segment_starts = agglomerative(data=slice_matrix, k=nb_segments) break_points = [] for i in segment_starts: if i > 0: # skip first one, since it's always a segm start! break_points.append(slice_names[i]) return(break_points) def bootstrap_segmentation(n_iter, nb_mfw_sampled, corpus_matrix, slice_names, prefix='en', nb_segments=3, random_state=2015): np.random.seed(random_state) corpus_matrix = np.asarray(corpus_matrix) sample_cnts = OrderedCounter() for sn in slice_names: sample_cnts[sn] = [] for i in range(nb_segments): sample_cnts[sn].append(0) for nb in range(n_iter): print('===============\niteration:', nb+1) # sample a subset of the features in our matrix: rnd_indices = np.random.randint(low=0, high=corpus_matrix.shape[1], size=nb_mfw_sampled) sampled_matrix = corpus_matrix[:,rnd_indices] # get which breaks are selected and adjust the cnts: selected_breaks = segment_cluster(sampled_matrix, slice_names, nb_segments=nb_segments) for i, break_ in enumerate(selected_breaks): sample_cnts[break_][i] += 1 plt.rcParams['font.family'] = 'arial' plt.rcParams['font.size'] = 8 plt.clf() plt.figure(figsize=(10,20)) sample_names, breakpoints_cnts = zip(*sample_cnts.items()) pos = [i for i, n in enumerate(sample_names)][::-1] # reverse for legibility plt.yticks(pos, [n[:3].replace('_', '') if n.endswith(('_1', '_0')) else ' ' for n in sample_names]) axes = plt.gca() axes.set_xlim([0,n_iter]) colors = sns.color_palette('hls', nb_segments) for i in range(nb_segments-1): cnts = [c[i] for c in breakpoints_cnts] plt.barh(pos, cnts, align='center', color=colors[i], linewidth=0, label="Boundary "+str(i+1)) plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='on') plt.tick_params(axis='x', which='both', top='off') plt.legend() plt.savefig('../outputs/'+prefix+'_bootstrap_segment'+str(nb_segments)+'.pdf')
jameslyons/pycipher
tests/test_vigenere.py
Python
mit
1,247
0.008019
from pycipher import Vigenere import unittest class TestVigenere(unittest.TestCase): def test_encipher(self): keys = ('GERMAN', 'CIPHERS') plaintext = ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz') ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl', 'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo') for i,key in enumerate(keys): enc = Vigenere(key).encipher(plaintext[i]) self.assertEqual(enc.upper(), ciphertext[i].upper()) def test_decipher(self): keys = ('GERMAN', 'CIPHERS') plaintext= ('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyzabcde
fghijklmnopqrstuvwxyz') ciphertext = ('gftpesmlzvkysrfbqeyxlhwkedrncqkjxtiwqpdzocwvjfuicbpl', 'cjrkiwyjqyrpdfqxfywkmxemfdrteltmkyalsatrfhszhaymozgo') for i,key in enumerate(keys): dec = Vigenere(key).decipher(ciphertext[i]) self.assertEqual(dec.upper(), plaintext[i].upper()) if __name__ == '__main__': unittest.main()
sio2project/oioioi
oioioi/participants/utils.py
Python
gpl-3.0
6,045
0.000496
import unicodecsv from django.contrib.auth.models import User from django.http import HttpResponse from django.utils.encoding import force_text from six.moves import map from oioioi.base.permissions import make_request_condition from oioioi.base.utils import request_cached from oioioi.participants.controllers import ParticipantsController from oioioi.participants.models import Participant def is_contest_with_participants(contest): rcontroller = contest.controller.registration_controller() return isinstance(rcontroller, ParticipantsController) def is_onsite_contest(contest): if not is_contest_with_participants(contest): return False from oioioi.participants.admin import OnsiteRegistrationParticipantAdmin rcontroller = contest.controller.registration_controller() padmin = rcontroller.participant_admin return padmin and issubclass(padmin, OnsiteRegistrationParticipantAdmin) @make_request_condition def contest_has_participants(request): return is_contest_with_participants(request.contest) @make_request_condition def has_participants_admin(request): rcontroller = request.contest.controller.registration_controller() return getattr(rcontroller, 'participant_admin', None) is not None @make_request_condition def contest_is_onsite(request): return is_onsite_contest(request.contest) @request_cached def get_participant(request): try: return Participant.objects.get(contest=request.contest, user=request.user) except Participant.DoesNotExist: return None @make_request_condition @request_cached def can_register(request): if get_participant(request) is not None: return False rcontroller = request.contest.controller.registration_controller() return rcontroller.can_register(request) @make_request_condition @request_cached def can_edit_registration(request): participant = get_participant(request) if participant is None: return False rcontroller = request.contest.controller.registration_controller() return rcontroller.can_edit_registration(request, participant) @make_request_condition @request_cached def can_unregister(request): participant = get_participant(request) if participant is None: return False rcontroller = request.contest.controller.registration_controller() return rcontroller.can_unregister(request, participant) @make_request_condition @request_cached def is_participant(request): rcontroller = request.contest.controller.registration_controller() qs = User.objects.filter(id=request.user.id) return rcontroller.filter_participants(qs).exists() def _fold_registration_models_tree(object): """Function for serialize_participants_data. Walks over model of the object, gets models related to the model and lists all their fields.""" result = [] objects_used = [object] # https://docs.djangoproject.com/en/1.9/ref/models/meta/#migrating-old-meta-api def get_all_related_objects(_meta): return [ f for f in _meta.get_fields() if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete ] objs = [ getattr(object, rel.get_accessor_name()) for rel in get_all_related_objects(object._meta) if hasattr(object, rel.get_accessor_name()) ] while objs: current = objs.pop(0) if current is None: continue objects_used.append(current) for field in current._meta.fields: if ( field.remote_field is not None and getattr(current, field.name) not in objects_used ): objs.append(getattr(current, field.name)) for obj in objects_used: for field in obj._meta.fields: if not field.auto_created: if field.remote_field is None: result += [(obj, field)] return result def serialize_participants_data(request, participants): """Serializes all personal data of participants to a table. :param participants: A QuerySet from table participants. """ if not participants.exists(): return {'no_participants': True} display_email = request.contest.controller.show_email_in_participants_data keys = ['username', 'user ID', 'first name', 'last name'] + ( ['email address'] if display_email else [] ) def key_name(attr): (obj, field) = attr return str(obj.__class__.__name__) + ": " + field.verbose_name.title() set_of_keys = set(keys) for participant in participants: for key in map(key_name, _fold_registration_models_tree(participant)): if key not in set_of_keys: set_of_keys.add(key) keys.append(key) def key_value(attr): (obj, field) = attr return (key_name((obj, field)), field.value_to_string(obj)) data = [] for participant in participants: values = dict(list(map(key_value, _fold_registration_models_tree(participant)))) values['username'] = participant.user.username values['user ID'] = participant.user.id values['first name'] = participant.user.first_name values['last name'] = participant.user
.last_name if display_email: value
s['email address'] = participant.user.email data.append([values.get(key, '') for key in keys]) return {'keys': keys, 'data': data} def render_participants_data_csv(request, participants, name): data = serialize_participants_data(request, participants) response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % ( name, "personal-data", ) if 'no_participants' not in data: writer = unicodecsv.writer(response) writer.writerow(list(map(force_text, data['keys']))) for row in data['data']: writer.writerow(list(map(force_text, row))) return response
justyns/home-assistant
homeassistant/components/light/tellstick.py
Python
mit
3,211
0
""" Support for Tellstick lights. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.tellstick/ """ from homeassistant.components import tellstick from homeassistant.components.light import ATTR_BRIGHTNESS, Light from homeassistant.components.tellstick import (DEFAULT_SIGNAL_REPETITIONS, ATTR_DISCOVER_DEVICES, ATTR_DISCOVER_CONFIG) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Setup Tellstick lights.""" if (discovery_info is None or discovery_info[ATTR_DISCOVER_DEVICES] is None or tellstick.TELLCORE_REGISTRY is None): return signal_repetitions = discovery_info.get(ATTR_DISCOVER_CONFIG, DEFAULT_SIGNAL_REPETITIONS) add_devices(TellstickLight( tellstick.TELLCORE_REGISTRY.get_device(switch_id), signal_repetitions) for switch_id in discovery_info[ATTR_DISCOVER_DEVICES]) class TellstickLight(tellstick.TellstickDevice, Light): """Representation of a Tellstick light.""" def __init__(self, tellstick_device, signal_repetitions): """Initialize the light.""" self._brightness = 255 tellstick.TellstickDevice.__init__(self, tellstick_device, signal_repetitions) @property def is_on(self): """Return true if switch is on.""" return self._state @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness def set_tellstick_state(self, last_command_sent, last_data_sent): """Update the internal representation of the switch.""" from tellcore.constants import TELLSTICK_TURNON, TELLSTICK_DIM if last_command_sent == TELLSTICK_DIM: if last_data_sent is not None: self._brightness = int(last_data_sent) self._state = self._brightness > 0 else: self._state = last_command_sent == TELLSTICK_TURNON def _send_tellstick_command(self, command, data): """Handle the turn_on / turn_off commands.""" from tellcore.constants import (TELLSTICK_TURNOFF, TELLSTICK_DIM) if command == TELLSTICK_TURNOFF: self.tellstick_device.turn_off() elif command == TELLSTICK_DIM:
self.tellstick_device.dim(self._brightness) else: raise NotImplementedError( "Command not implemen
ted: {}".format(command)) def turn_on(self, **kwargs): """Turn the switch on.""" from tellcore.constants import TELLSTICK_DIM brightness = kwargs.get(ATTR_BRIGHTNESS) if brightness is not None: self._brightness = brightness self.call_tellstick(TELLSTICK_DIM, self._brightness) def turn_off(self, **kwargs): """Turn the switch off.""" from tellcore.constants import TELLSTICK_TURNOFF self.call_tellstick(TELLSTICK_TURNOFF)
pashinin-com/pashinin.com
src/core/models.py
Python
gpl-3.0
22,154
0.000045
import json import hashlib from django.db import models from django.db.models import Count, Func from django.contrib.postgres.fields import ArrayField from django.contrib.auth.models import BaseUserManager, AbstractBaseUser from django.utils.translation import gettext_lazy as _ # from social.apps.django_app.default.models import UserSocialAuth from django.contrib.auth.models import Permission, Group, PermissionsMixin from django.db import transaction from random import randint from django.core.cache import cache from mptt.models import MPTTModel, TreeForeignKey from netfields import InetAddressField, NetManager from django_gravatar.helpers import get_gravatar_url from . import now # from lazysignup.utils import is_lazy_user # Travis payload format: # https://docs.travis-ci.com/user/notifications#Webhooks-Delivery-Format class SiteUpdate(models.Model): started = models.DateTimeField( default=None, null=True, blank=True, db_index=True ) finished = models.DateTimeField( auto_now_add=True, db_index=True, null=True, blank=True ) sha1 = models.CharField(max_length=40, editable=False, unique=True) commit_time = models.DateTimeField( db_index=True, null=True, blank=True ) commit_message = models.CharField( max_length=150, editable=False, null=True, blank=True ) travis_raw = models.TextField(null=True, blank=True) log = models.TextField(null=True, blank=True) class Meta: verbose_name = _("Site update") verbose_name_plural = _("Site updates") @property def travis_raw_pretty(self): if self.travis_raw: parsed = json.loads(self.travis_raw) return json.dumps(parsed, indent=4, sort_keys=True) else: return "" @property def length(self): if self.finished and self.started: return self.finished-self.started else: return None def __str__(self): return self.sha1 class AddedChanged(models.Model): added = models.DateTimeField( auto_now_add=True, db_index=True, # default=now, ) changed = models.DateTimeField( auto_now=True, db_index=True, # default=now ) # , editable=False class Meta: abstract = True class UserManager(BaseUserManager): def create_user(self, email, username=None, password=None): if not email: raise ValueError('Users must have an email address') user = self.model( email=self.normalize_email(email), username=username, is_staff=False, is_active=True, is_superuser=False, last_login=now(), date_joined=now() ) user.set_password(password) user.save(using=self._db) return user def random(self): """TODO""" # there can be deleted items with transaction.atomic(): count = self.aggregate(count=Count('id'))['count'] random_index = randint(0, count - 1) return self.all()[random_index] def create_superuser
(self, email, username, password): user = self.create_user(email, username, password) user.is_active = True user.is_staff = True user.is_superuser = True user.save(using=self._db) return user class User(
AbstractBaseUser, PermissionsMixin): objects = UserManager() USERNAME_FIELD = 'email' email = models.EmailField( verbose_name='Email', max_length=255, unique=True, db_index=True, blank=True, null=True, default=None, ) username = models.CharField( max_length=200, db_index=True, # unique=True, default='', blank=True, null=True, help_text=_("This is an unique identifier, not actual username. Can be a session \ key for temporary users") ) # is_superuser = models.BooleanField(default=False) is_staff = models.BooleanField( default=False, help_text=_("Designates whether this user can access the admin site.") ) is_active = models.BooleanField(default=True) date_joined = models.DateTimeField(auto_now_add=True, db_index=True) first_name = models.CharField( max_length=200, blank=True, null=True, ) last_name = models.CharField( max_length=200, blank=True, null=True, ) date_last_pass_sent = models.DateTimeField(null=True) skype = models.CharField(max_length=200, blank=True, null=True) discord = models.CharField(max_length=200, blank=True, null=True) phone = models.CharField(max_length=200, blank=True, null=True) city = models.CharField(max_length=200, blank=True, null=True) browser_on_creation = models.CharField( max_length=300, db_index=True, default=None, blank=True, null=True, help_text=_("Browser string used when this user was created") ) created_from_ip = models.GenericIPAddressField(blank=True, null=True) timezone_str = models.CharField( max_length=50, db_index=True, default='UTC', ) # avatar = models.ForeignKey( # 'images.Image', # null=True, # blank=True, # # help_text=_("Avatar image") # ) permissions = models.ManyToManyField( Permission, related_name="permissions", blank=True ) groups = models.ManyToManyField( Group, related_name="groups", blank=True ) telegram_chat_id = models.IntegerField( blank=True, null=True, ) class Meta: verbose_name = _("User") verbose_name_plural = _("Users") def gravatar(self, size_in_px=25): """Return authorized social accounts""" return get_gravatar_url(self.email, size=size_in_px) # @property # def social_accounts(self): # """Return authorized social accounts""" # return UserSocialAuth.objects.filter(user=self) @property def is_lazy(self): return True # return is_lazy_user(self) def get_full_name(self): "Used in Admin. Dajngo wants this to be defined." return "{} {}".format(self.first_name, self.last_name) def get_short_name(self): "Used in Admin. Dajngo wants this to be defined." return self.email def __str__(self): # if self.is_lazy: # return "{}".format(_('Anonymous')) if self.first_name: return self.first_name elif self.email: return self.email else: return "User {}".format(self.pk) # pip install django-mptt class Tree(MPTTModel): parent = TreeForeignKey( 'self', default=None, null=True, blank=True, db_index=True, # related_name="%(app_label)s_%(class)s_parent", # related_name="%(app_label)s_%(class)s_children", related_name='children', verbose_name=_("Parent element"), on_delete=models.SET_NULL, ) class Meta: abstract = True class Comment(Tree): author = models.ForeignKey( 'core.User', default=None, null=True, blank=True, on_delete=models.SET_NULL, ) src = models.TextField() class LoginAttempt(models.Model): ''' A login attempt record (both successful and not). If user field is set then login was successful. Instead login and password fields are set. ''' # https://docs.python.org/3/library/ipaddress.html # inet = InetAddressField(primary_key=True) ip = InetAddressField() login = models.CharField( max_length=260, null=True, blank=True, ) password = models.CharField( max_length=260, null=True, blank=True, ) user = models.ForeignKey( 'core.User', default=None, null=True, blank=True, on_delete=models.SET_NULL, ) time = models.DateTimeField( auto_now_add=True, db
openseat/ipylayoutwidgets
ipylayoutwidgets/widgets/__init__.py
Python
bsd-3-clause
88
0.011364
from .widg
et_svg_layout import SVGLayoutBox from .widget_fullscreen import Fullscr
eenBox
cuongnb14/bilyric
config/wsgi.py
Python
mit
1,707
0
""" WSGI config for Bilyric project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace
the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os from django.core.wsgi import get_wsgi_application if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production': from raven.contrib.django.raven_compa
t.middleware.wsgi import Sentry # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application() if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production': application = Sentry(application) # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
capitalone/cloud-custodian
tests/test_ebs.py
Python
apache-2.0
25,361
0.000789
# Copyright 2016-2017 Capital One Services, LLC # Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 import logging from botocore.exceptions import ClientError import mock from c7n.exceptions import PolicyValidationError from c7n.executor import MainThreadExecutor from c7n.resources.aws import shape_validate from c7n.resources.ebs import ( CopyInstanceTags, EncryptInstanceVolumes, CopySnapshot, Delete, ErrorHandler, SnapshotQueryParser as QueryParser ) from .common import BaseTest class SnapshotQueryParse(BaseTest): def test_query(self): qfilters = [ {'Name': 'tag:Name', 'Values': ['Snapshot1']}, {'Name': 'status', 'Values': ['completed']}] self.assertEqual(qfilters, QueryParser.parse(qfilters)) def test_invalid_query(self): self.assertRaises( PolicyValidationError, QueryParser.parse, {}) self.assertRaises( PolicyValidationError, QueryParser.parse, [None]) self.assertRaises( PolicyValidationError, QueryParser.parse, [{'X': 1}]) self.assertRaises( PolicyValidationError, QueryParser.parse, [ {'Name': 'status', 'Values': 'completed'}]) self.assertRaises( PolicyValidationError, QueryParser.parse, [ {'Name': 'status', 'Values': ['Completed']}]) self.assertRaises( PolicyValidationError, QueryParser.parse, [ {'Name': 'snapshot-id', 'Values': [1]}]) class SnapshotErrorHandler(BaseTest): def test_tag_error(self): snaps = [{'SnapshotId': 'aa'}] error_response = { "Error": { "Message": "The snapshot 'aa' does not exist.", "Code": "InvalidSnapshot.NotFound", } } client = mock.MagicMock() client.create_tags.side_effect = ClientError(error_response, 'CreateTags') p = self.load_policy({ "name": "snap-copy", "resource": "ebs-snapshot", 'actions': [{'type': 'tag', 'tags': {'bar': 'foo'}}]}) tagger = p.resource_manager.actions[0] tagger.process_resource_set(client, snaps, [{'Key': 'bar', 'Value': 'foo'}]) client.create_tags.assert_called_once() def test_remove_snapshot(self): snaps = [{'SnapshotId': 'a'}, {'SnapshotId': 'b'}, {'SnapshotId': 'c'}] t1 = list(snaps) ErrorHandler.remove_snapshot('c', t1) self.assertEqual([t['SnapshotId'] for t in t1], ['a', 'b']) ErrorHandler.remove_snapshot('d', snaps) self.assertEqual(len(snaps), 3) def test_get_bad_snapshot_malformed(self): operation_name = "DescribeSnapshots" error_response = { "Error": { "Message": 'Invalid id: "snap-malformedsnap"', "Code": "InvalidSnapshotID.Malformed", } } e = ClientError(error_response, operation_name) snap = ErrorHandler.extract_bad_snapshot(e) self.assertEqual(snap, "snap-malformedsnap") def test_get_bad_snapshot_notfound(self): operation_name = "DescribeSnapshots" error_response = { "Error": { "Message": "The snapshot 'snap-notfound' does not exist.", "Code": "InvalidSnapshot.NotFound", } } e = ClientError(error_response, operation_name) snap = ErrorHandler.extract_bad_snapshot(e) self.assertEqual(snap, "snap-notfound") def test_get_bad_volume_malformed(self): operation_name = "DescribeVolumes" error_response = { "Error": { "Message": 'Invalid id: "vol-malformedvolume"', "Code": "InvalidVolumeID.Malformed", } } e = ClientError(error_response, operation_name) vol = ErrorHandler.extract_bad_volume(e) self.assertEqual(vol, "vol-malformedvolume") def test_get_bad_volume_notfound(self): operation_name = "DescribeVolumes" error_response = { "Error": { "Message": "The volume 'vol-notfound' does not exist.", "Code": "InvalidVolume.NotFound", } } e = ClientError(error_response, operation_name) vol = ErrorHandler.extract_bad_volume(e) self.assertEqual(vol, "vol-notfound") def test_snapshot_copy_related_tags_missing_volumes(self): factory = self.replay_flight_data( "test_ebs_snapshot_copy_related_tags_missing_volumes") p = self.load_policy( { "name": "copy-related-tags", "resource": "aws.ebs-snapshot", "filters": [{"tag:Test": "Test"}], "actions": [ { "type": "copy-related-tag", "resource": "ebs", "key": "VolumeId", "tags": "*" } ] }, session_factory=factory ) try: resources = p.run() except ClientError: # it should filter missing volume and not throw an error self.fail("This should have been handled in ErrorHandler.extract_bad_volume") self.assertEqual(len(resources), 1) try: factory().client("ec2").describe_volumes( VolumeIds=[resources[0]["VolumeId"]] ) except ClientError as e: # this should not filter missing volume and will throw an error msg = e.response["Error"]["Message"] err = e.response["Error"]["Code"] self.assertEqual(err, "InvalidVolume.NotFound") self.assertEqual(msg, f"The volume '{resources[0]['VolumeId']}' does not exist.") class SnapshotAccessTest(BaseTest): def test_snapshot_access(self): # pre conditions, 2 snapshots one shared to a separate account, and one # shared publicly. 2 non matching volumes, one not shared, one shared # explicitly to its own account. self.patch(CopySnapshot, "executor_factory", MainThreadExecutor) factory = self.replay_flight_data("test_ebs_cross_account") p = self.load_policy( { "name": "snap-copy", "resource": "ebs-snapshot", "filters": ["cross-account"], }, session_factory=factory, ) resources = p.run() self.assertEqual(len(resources), 2) self.assertEqual( {r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources}, {"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]}, ) class SnapshotDetachTest(BaseTest): def test_volume_detach(self): factory = self.replay_flight_data('test_ebs_detach') p = self.load_policy( { 'name': 'volume-detach', 'resource': 'ebs', 'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}], 'actions': [ { 'type': 'detach' } ] }, session_factory=factory) resources
= p.run() self.assertEqual(len(resources), 1) client = factory(region="us-east-1").client('ec2') volumelist = [] volumelist.append(resources[0]['VolumeId
']) response = client.describe_volumes(VolumeIds=volumelist) for resp in response['Volumes']: for attachment in resp['Attachments']: self.assertTrue(attachment['State'] == "detached" or attachment['State'] == "detaching") class SnapshotCopyTest(BaseTest): def test_snapshot_copy(self): self.patch(CopySnapshot, "executor_factory", MainThreadExecutor) self.change_environment(AWS_DEFAULT_REGION="us-west-2") factory = self.replay_flight_data("test_ebs_snapshot_copy") p = self.load_policy( { "name": "snap-copy", "resource": "ebs-snapsh
mtasic85/dockyard
host.py
Python
mit
5,231
0.006117
# -*- coding: utf-8 -*- __all__ = ['host_blueprint'] from datetime import datetime, timedelta # dateutil from dateutil.parser import parse as dtparse # flask from flask import ( Flask, request, session, g, redirect, url_for, abort, render_template, flash, jsonify, Blueprint, abort, send_from_directory, current_app, ) # flask login from flask.ext.login import login_required, fresh_login_required, current_user # flask-wtf from flask.ext.wtf import Form from wtforms import validators from wtforms import TextField, PasswordField, SelectField, BooleanField from wtforms_html5 import EmailField # requests import requests from requests.auth import HTTPBasicAuth # model from model.db import db from model.db import object_to_dict, objects_to_list, update_object_with_dict from model.user import UserAccount, UserQuota from model.host import Host host_blueprint = Blueprint('host_blueprint', __name__) @host_blueprint.route('/hosts', methods=['GET']) @login_required def host_hosts(): username = current_user.username print 'host_hosts:', locals() # get user account properties user_account = UserAccount.query.filter_by(username=username).one() dct = object_to_dict(user_account) return render_template( 'host-hosts.html', **dct ) @host_blueprint.route('/hosts/all', methods=['POST']) @login_required def host_hosts_all(): username = current_user.username usertype = current_user.usertype print 'host_hosts_all:', locals() if usertype != 'super': data = {} return jsonify(data) hosts = Host.query.all() _hosts = objects_to_list(hosts) data = { 'hosts': _hosts, } return jsonify(data) @host_blueprint.route('/host/create', methods=['POST']) @login_required def host_create(): username = current_user.username usertype = current_user.usertype _host = request.json['host'] print 'host_add:', locals() if usertype != 'super': data = {} return jsonify(data) name = _host['name'] host = _host['host'] port = _host['port'] auth_username = _host['auth_username'] auth_password = _host['auth_password'] ram_capacity = _host['ram_capacity'] ram_reserved = _host['ram_reserved'] if '[' in name and '-' in name and ']' in name and \ '[' in host and '-' in host and ']' in host: _hosts = [] hosts = [] # name base/range s = name.find('[') e = name.find(']') name_base = name[:s] name_range = name[s + 1:e] name_range = name_range.strip(' ').strip() name_range = map(int, name_range.split('-')) name_range[1] += 1 # host base/range s = host.find('[') e = host.find(']') host_base = host[:s] host_range = host[s + 1:e] host_range = host_range.strip(' ').strip() host_range = map(int, host_range.split('-')) host_range[1] += 1 for i, j in zip(range(*name_range), range(*host_range)): __host = { 'name': '%s%i' % (name_base, i), 'host': '%s%i' % (host_base, j), 'port': port, 'auth_username': auth_username, 'auth_password': auth_password, 'ram_capacity': ram_capacity, 'ram_reserved': ram_reserved, } __host['created'] = __host['updated'] = datetime.utcnow() host = Host(**__host) db.session.add(host) hosts.append(host) db.session.commit() for host in hosts: __host = object_to_dict(host) _hosts.append(__host) data = { 'hosts': _hosts, } else: _host['created'] = _host['updated
'] = datetime.utcnow() host = Host(**_host) db.session.add(host) db.session.commit() _host = object_to_dict(host) data = { 'host': _host, } return jsonify(data) @host_blueprint.route('/host/update', methods=['POST']) @login_required def host_update(): username = current_user.username usertype = current_user.usertype _host = request.json['host'] print 'host_
update:', locals() if usertype != 'super': data = {} return jsonify(data) host = Host.query.get(_host['id']) assert host is not None _host['updated'] = datetime.utcnow() update_object_with_dict(host, _host) db.session.commit() _host = object_to_dict(host) data = { 'host': _host, } return jsonify(data) @host_blueprint.route('/host/remove', methods=['POST']) @login_required def host_remove(): username = current_user.username usertype = current_user.usertype id = request.json['id'] print 'host_remove:', locals() if usertype != 'super': data = {} return jsonify(data) host = Host.query.get(id) assert host is not None db.session.delete(host) db.session.commit() data = {} return jsonify(data)
bbfamily/abu
abupy/TradeBu/ABuTradeProxy.py
Python
gpl-3.0
14,496
0.001752
# -*- encoding:utf-8 -*- """ 交易执行代理模块 """ from __future__ import print_function from __future__ import absolute_import from __future__ impo
rt division from contextlib import contextmanager from functools import total_ordering from enum imp
ort Enum import numpy as np import pandas as pd from . import ABuTradeDrawer from . import ABuTradeExecute __author__ = '阿布' __weixin__ = 'abu_quant' class EOrderSameRule(Enum): """对order_pd中对order判断为是否相同使用的规则""" """order有相同的symbol和买入日期就认为是相同""" ORDER_SAME_BD = 0 """order有相同的symbol, 买入日期,和卖出日期,即不考虑价格,只要日期相同就相同""" ORDER_SAME_BSD = 1 """order有相同的symbol, 买入日期,相同的买入价格,即单子买入时刻都相同""" ORDER_SAME_BDP = 2 """order有相同的symbol, 买入日期, 买入价格, 并且相同的卖出日期和价格才认为是相同,即买入卖出时刻都相同""" ORDER_SAME_BSPD = 3 @total_ordering class AbuOrderPdProxy(object): """ 包装交易订单构成的pd.DataFrame对象,外部debug因子的交易结果,寻找交易策略的问题使用, 支持两个orders_pd的并集,交集,差集,类似set的操作,同时支持相等,不等,大于,小于 的比较操作,eg如下: orders_pd1 = AbuOrderPdProxy(orders_pd1) with orders_pd1.proxy_work(orders_pd2) as (order1, order2): a = order1 | order2 # 两个交易结果的并集 b = order1 & order2 # 两个交易结果的交集 c = order1 - order2 # 两个交易结果的差集(在order1中,但不在order2中) d = order2 - order1 # 两个交易结果的差集(在order2中,但不在order1中) eq = order1 == order2 # 两个交易结果是否相同 lg = order1 > order2 # order1唯一的交易数量是否大于order2 lt = order1 < order2 # order1唯一的交易数量是否小于order2 """ def __init__(self, orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD): """ 初始化函数需要pd.DataFrame对象,暂时未做类型检测 :param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD 即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同 """ # 需要copy因为会添加orders_pd的列属性等 self.orders_pd = orders_pd.copy() self.same_rule = same_rule # 并集, 交集, 差集运算结果存储 self.op_result = None self.last_op_metrics = {} @contextmanager def proxy_work(self, orders_pd): """ 传人需要比较的orders_pd,构造ABuOrderPdProxy对象,返回使用者, 对op_result进行统一分析 :param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :return: """ # 运算集结果重置 self.op_result = None # 实例化比较的ABuOrderPdProxy对象 other = AbuOrderPdProxy(orders_pd) try: yield self, other finally: if isinstance(self.op_result, pd.DataFrame): # 如果有并集, 交集, 差集运算结果存储, from ..MetricsBu.ABuMetricsBase import AbuMetricsBase metrics = AbuMetricsBase(self.op_result, None, None, None) metrics.fit_metrics_order() self.last_op_metrics['win_rate'] = metrics.win_rate self.last_op_metrics['gains_mean'] = metrics.gains_mean self.last_op_metrics['losses_mean'] = metrics.losses_mean self.last_op_metrics['sum_profit'] = self.op_result['profit'].sum() self.last_op_metrics['sum_profit_cg'] = self.op_result['profit_cg'].sum() def __and__(self, other): """ & 操作符的重载,计算两个交易集的交集""" # self.op = 'intersection(order1 & order2)' self.op_result = intersection_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule) return self.op_result def __or__(self, other): """ | 操作符的重载,计算两个交易集的并集""" # self.op = 'union(order1 | order2)' self.op_result = union_in_2orders(self.orders_pd, other.orders_pd) return self.op_result def __sub__(self, other): """ - 操作符的重载,计算两个交易集的差集""" self.op_result = difference_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule) return self.op_result def __eq__(self, other): """ == 操作符的重载,计算两个交易集的是否相同""" return (self - other).empty and (other - self).empty def __gt__(self, other): """ > 操作符的重载,计算两个交易集的大小, 类被total_ordering装饰,可以支持lt等操作符""" unique_cnt = find_unique_group_symbol(self.orders_pd).shape[0] other_unique_cnt = find_unique_group_symbol(other.orders_pd).shape[0] return unique_cnt > other_unique_cnt def union_in_2orders(orders_pd, other_orders_pd): """ 并集:分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中所有不同的交易, 注意这里不认为在相同的交易日买入相同的股票,两笔交易就一样,这里只是两个orders_pd合并 后使用drop_duplicates做了去除完全一样的order,即结果为并集: orders_pd | cmp_orders_pd或orders_pd.union(cmp_orders_pd) :param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :return: orders_pd | cmp_orders_pd """ orders_pd = orders_pd.append(other_orders_pd) orders_pd = orders_pd.drop_duplicates() return orders_pd def _same_pd(order, other_orders_pd, same_rule): """ 根据same_rule的规则从orders_pd和other_orders_pd中返回相同的df :param order: orders_pd中的一行order记录数据 :param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param same_rule: order判断为是否相同使用的规则 :return: 从orders_pd和other_orders_pd中返回相同的df """ symbol = order.symbol buy_day = order['buy_date'] buy_price = order['buy_price'] sell_day = order['sell_date'] sell_price = order['sell_price'] if same_rule == EOrderSameRule.ORDER_SAME_BD: # 只根据买入时间和买入symbol确定是否相同,即认为在相同的交易日买入相同的股票,两笔交易就一样,忽略其它所有order中的因素 same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)] elif same_rule == EOrderSameRule.ORDER_SAME_BSD: # 根据买入时间,卖出时间和买入symbol确定是否相同 same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day) & (other_orders_pd['sell_date'] == sell_day)] elif same_rule == EOrderSameRule.ORDER_SAME_BDP: # 根据买入时间,买入价格和买入symbol确定是否相同 same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day) & (other_orders_pd['buy_price'] == buy_price)] elif same_rule == EOrderSameRule.ORDER_SAME_BSPD: # 根据买入时间,卖出时间, 买入价格和卖出价格和买入symbol确定是否相同 same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day) & (other_orders_pd['sell_date'] == sell_day) & (other_orders_pd['buy_price'] == buy_price) & (other_orders_pd['sell_price'] == sell_price)] else: raise TypeError('same_rule type is {}!!'.format(same_rule)) return same_pd def intersection_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD): """ 交集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中相同的交易, 即结果为交集:orders_pd & cmp_orders_pd或orders_pd.intersection(cmp_orders_pd) :param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD 即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同 :return: orders_pd & cmp_orders_pd """ def _intersection(order): same_pd = _same_pd(order, other_orders_pd, same_rule) if same_pd.empty: # 如果是空,说明不相交 return False # 相交, intersection=1,是交集 return True orders_pd['intersection'] = orders_pd.apply(_intersection, axis=1) return orders_pd[orders_pd['intersection'] == 1] def difference_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD): """ 差集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd的不同交易, 注意返回的结果是存在orders_pd中的交易,但不在cmp_orders_pd中的交易,即结果 为差集:orders_pd - cmp_orders_pd或orders_pd.difference(cmp_orders_pd) :param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象 :param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD 即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同 :return: orders_pd - cmp_orders_pd """ def _difference(order): same_pd = _same_pd(order, other_orders_pd, same_rule) if same
mozey/taskmage
taskmage/response.py
Python
mit
557
0.008977
from tabulate import tabulate class Response(): message = None; data = None; def print(self): if self.message: if type(self.message) == "str":
print(self.message) elif type
(self.message) == "list": for message in self.message: print("{}\n".format(message)) if (self.data): if len(self.data["rows"]) > 0: print(tabulate(self.data["rows"], headers=self.data["headers"])) else: print("Empty!")
funkybob/rattle
rattle/parser/__init__.py
Python
mit
1,048
0
import rply from ..lexer import lexers __all__ = ('parsers',) class Parsers(object): def __init__(self): self._fpg = None self._fp = None self._spg = None self._sp = None @property def fpg(self): if self._fpg is None: self._fpg = rply.ParserGenerator( [rule.name for rule in lexers.flg.rules], precedence=[] ) return self._fpg @property def fp(self): if self._fp is None: self._fp = self.fpg.build() return self._fp @property def spg(self): if self._spg is None: self._spg = rply.ParserGenerator( [rule.name for rule in lexers.slg.rules]
, precedence=[] ) return self._spg @property def sp(self): if self._sp is None: self._sp = self.spg.build() return self._sp parsers = Parsers() # Load productions from .filter import fpg # noqa from .structure impo
rt spg # noqa
Ezhil-Language-Foundation/open-tamil
examples/classifier/modelprocess2.py
Python
mit
3,817
0.001339
# -*- coding: utf-8 -*- # (C) 2017 Muthiah Annamalai # This file is part of open-tamil examples # This code is released under public domain import joblib # Ref API help from : https://scikit-learn.org import numpy as np import random import string import time from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import StandardScaler # project modules from classifier_eng_vs_ta import jaffna_transliterate from preprocess import Feature def data1(filename): x = np.loadtxt(open(filename, "r"), delimiter=",") y = np.ones(shape=(x.shape[0], 1)) return (x, y) def data0(filename): x = np.loadtxt(open(filename, "r"), delimiter=",") y = np.zeros(shape=(x.shape[0], 1)) return (x, y) DEBUG = False x1, y1 = data1("tamilvu_dictionary_words.txt.csv") x0, y0 = data0("english_dictionary_words.jaffna.csv") az_x0, az_y0 = data0("english_dictionary_words.azhagi.csv") cm_x0, cm_y0 = data0("english_dictionary_words.combinational.csv") x1 = x1.take(range(0, x0.shape[0]), axis=0) y1 = np.ones((x0.shape[0], 1)) ## Scale the data for the training X = np.concatenate((x0, x1), axis=0) Y = np.concatenate((y0, y1), axis=0) # Y = Y.take(range(0,X.shape[0]),axis=0).ravel() Y = Y.ravel() X_train, X_test, Y_train, Y_test = train_test_split(X, Y) scaler = StandardScaler() scaler.fit(X_train) joblib.dump(scaler, "test_scaler.pkl") # scaler Dump for webapps print("Size of Training set => %d" % X_train.shape[0]) print("Size of Test set => %d" % X_test.shape[0]) X_train = scaler.
transform(X_train) X_test = scaler.transform(X_test) ########### ## Build training set
for the model ## solver='sgd',activation='logistic', ## We have a 4-layer model nn = MLPClassifier(hidden_layer_sizes=(15, 15, 10, 5), activation='logistic', max_iter=100000, alpha=0.01, solver='lbfgs') # Try 1-layer simple model with logistic activation # nn = MLPClassifier( # hidden_layer_sizes=(8, 8, 7), solver="lbfgs" # ) # activation='logistic',max_iter=1000,early_stopping=True,solver='lbfgs') # max_iter=500,solver='sgd',activation='logistic') print(nn) nn.fit(X_train, Y_train) joblib.dump( nn, "nn-%s.pkl" % time.ctime() ) # change dump name to test_nn.pkl for webapps Y_pred = nn.predict(X_test) print(" accuracy => ", accuracy_score(Y_pred.ravel(), Y_test)) score = nn.score(X_test, Y_test) print("Score => ") print(score) print(confusion_matrix(Y_test, Y_pred.ravel())) print(classification_report(Y_test, Y_pred.ravel())) def process_word(s): if any([l in string.ascii_lowercase for l in s]): s = jaffna_transliterate(s) print(u"Transliterated to %s" % s) print(u"Checking in NN '%s'" % s) try: f = Feature.get(s) scaled_feature = scaler.transform(np.array(f.data()).reshape(1, -1)) y = nn.predict(scaled_feature) print(scaled_feature) print(y) if y.ravel() > 0: print(u"%s -> TAMIL world (most likely)" % s) else: print(u"%s -> ENG word (most likely)" % s) except Exception as ioe: print("SKIPPING => ", ioe.message) return for w in [ u"hello", u"ஆரொன்", u"உகந்த", u"கம்புயுடர்", u"கம்ப்யூட்டர்", u"பியூடிபுல்", "pupil", "beautiful", "summer", "sinful", "google", "facebook", "microsoft", "swift", ]: process_word(w) while True: s = input(u">> ").decode("utf-8") s = s.strip().lower() if s == "end": break if len(s) < 1: continue process_word(s)
sasha-gitg/python-aiplatform
google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py
Python
apache-2.0
29,300
0.001365
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.aiplatform_v1beta1.types import study from google.cloud.aiplatform_v1beta1.types import study as gca_study from google.cloud.aiplatform_v1beta1.types import vizier_service from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO from .grpc import VizierServ
iceGrpcTransport class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): """gRPC AsyncIO backend transport for VizierService. Vertex Vizier API. Vizier service is a GCP service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learnin
g architectures. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "aiplatform.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client = None if api_mtls_endpoint:
lfcnassif/MultiContentViewer
release/modules/ext/libreoffice/program/python-core-3.3.0/lib/urllib/response.py
Python
lgpl-3.0
3,021
0.001324
"""Response classes used by urllib. The base class, addbase, defines a minimal file-like interface, including read() and readline(). The typical response object is an addinfourl instance, which defines an info() method that returns headers and a geturl() method that returns the url. """ class addbase(object): """Base class for addinfo and addclosehook.""" # XXX Add a method to expose the timeout on the underlying socket? def __init__(self, fp): # TODO(jhylton): Is there a better way to delegate using io? self.fp = fp self.read = self.fp.read self.readline = self.fp.readline # TODO(jhylton): Make sure an object with readlines() is also iterable if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno else: self.fileno = lambda: None def __iter__(self): # Assigning `__iter__` to the instance doesn't work as intended # because the iter builtin does something like `cls.__iter__(obj)` # and thus fails to find the _bound_ method `obj.__iter__`. # Returning just `self.fp` works for built-in file objects but # might not work for general file-like objects. return iter(self.fp) def __repr__(self): return '<%s at %r whose fp = %r>' % (self.__class__.__name__, id(self), self.fp) def close(self): if self.fp: self.fp.close() self.fp = None self.read = None self.readline = None self.readlines = None self.fileno = None self.__iter__ = None self.__next__ = None def __enter__(self): if self.fp is None: raise ValueError("I/O operation on closed file") return self def __exit__(self, type, value, traceback): self.close() class addclosehook(addbase): """Class to add a close hook to an open file.""" def __init__(self, fp, closehook, *hookargs): addbase.__init__(self, fp) self.closehook = closehook self.hookargs = hookargs def close(self): if self.closehook: self.closehook(*self.hookargs) self.closehook = None self.hookargs = None addbase.close(self) class addinfo(addbase): """class to add an info() method to an open file.""" def __init__(self, fp, headers): addbase.__init__(self, fp) self.h
eaders = headers def info(self): return self.headers class addinfourl
(addbase): """class to add info() and geturl() methods to an open file.""" def __init__(self, fp, headers, url, code=None): addbase.__init__(self, fp) self.headers = headers self.url = url self.code = code def info(self): return self.headers def getcode(self): return self.code def geturl(self): return self.url
rtfd/readthedocs.org
readthedocs/oauth/management/commands/reconnect_remoterepositories.py
Python
mit
4,783
0.002091
import json from django.db.models import Q, Subquery from django.core.management.base import BaseCommand from readthedocs.oauth.models import RemoteRepository from readthedocs.oauth.services import registry from readthedocs.oauth.services.base import SyncServiceError from readthedocs.projects.models import Project from readthedocs.organizations.models import Organization class Command(BaseCommand): help = "Re-connect RemoteRepository to Project" def add_arguments(self, parser): parser.add_argument('organization', nargs='+', type=str) parser.add_argument( '--no-dry-run', action='store_true', default=False, help='Update database with the changes proposed.', ) # If owners does not have their RemoteRepository synced, it could # happen we don't find a matching Project (see --force-owners-social-resync) parser.add_argument( '--only-owners', action='store_true', default=False, help='Connect repositories only to organization owners.', ) parser.add_argument( '--force-owners-social-resync', action='store_true', default=False, help='Force to re-sync RemoteRepository for organization owners.', ) def _force_owners_social_resync(self, organization): for owner in organization.owners.all(): for service_cls in registry: for service in service_cls.for_user(owner): try: service.sync() except SyncServiceError: print(f'Service {service} failed while syncing. Skipping...') def _connect_repositories(self, organization, no_dry_run, only_owners): connected_projects = [] # TODO: consider using same login than RemoteRepository.matches method # https://github.com/readthedocs/readthedocs.org/blob/49b03f298b6105d755554f7dc7e97a3398f7066f/readthedocs/oauth/models.py#L185-L194 remote_query = ( Q(ssh_url__in=Subquery(organization.projects.values('repo'))) | Q(clone_url__in=Subquery(organization.projects.values('repo'))) ) for remote in RemoteRepository.objects.filter(remote_query).order_by('created'): admin = json.loads(remote.json).get('permissions', {}).get('admin') if only_owners and remote.users.first() not in organization.owners.all(): # Do not connect a RemoteRepository if the User is not owner of the organization continue if not admin: # Do not connect a RemoteRepository where the User is not admin of the repository continue if not organization.users.filter(username=remote.users.first().username).exists(): # Do not connect a RemoteRepository if the use does not belong to the organization continue # Projects matching # - RemoteRepository URL # - are under the Organization # - not connected to a RemoteRepository already # - was not connected previously by this call to the script projects = Project.objects.filter( Q(repo=remote.ssh_url) | Q(repo=remote.clone_url), organizations__in=[organization.pk], remote_repository__isnull=True ).exclude(slug__in=connected_projects) for project in projects: connected_projects.append(project.slug) if no_dry_run: remote.project = project remote.save() print(f'{project.slug: <40} {remote.pk: <10} {remote.html_url: <60} {remote.users.first().username: <20} {admin: <5}') # noqa print('Total:', len(connected_projects)) if not no_dry_run: print( 'Changes WERE NOT applied to the database. ' 'Run it with --no-dry-run to save the changes.' ) def handle(self, *args, **options): no_dry_run = options.get('no_dry_run') only_owners = options.get('only_owners') force_owners_social_resync = options.get('force_owners_social_resync') for organization in options.get('organization'): try:
organization = Organization.objects.get(slug=organization) if force_owners_social_resync: self._force_owners_soci
al_resync(organization) self._connect_repositories(organization, no_dry_run, only_owners) except Organization.DoesNotExist: print(f'Organization does not exist. organization={organization}')
grnet/synnefo
snf-cyclades-app/synnefo/db/migrations/old/0080_nics_to_ips.py
Python
gpl-3.0
20,524
0.008234
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." # Delete stale NICs orm.NetworkInterface.objects.filter(machine__deleted=True).delete() for nic in orm.NetworkInterface.objects.select_related('machine', 'network').all(): userid = nic.machine.userid nic.userid = userid nic.save() network = nic.network for attr in ["ipv4", "ipv6"]: address = getattr(nic, attr) if address: ipversion = 4 if attr == "ipv4" else 6 subnet = nic.network.subnets.get(ipversion=ipversion) orm.IPAddress.objects.create(network=network, subnet=subnet, nic=nic, userid=userid, address=address) def backwards(self, orm): "Write your backwards methods here." for ip in orm.IPAddress.objects.filter(deleted=False): nic = ip.nic attr = "ipv4" if nic.subnet.ipversion == 4 else "ipv6" setattr(nic, attr, ip.address) nic.save() models = { 'db.backend': { 'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'}, 'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'T
rue'}), 'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_
length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}), 'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'db.backendnetwork': { 'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'}, 'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}), 'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}), 'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}), 'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}), 'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'db.bridgepooltable': { 'Meta': {'object_name': 'BridgePoolTable'}, 'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}), 'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'db.flavor': { 'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'}, 'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'db.floatingip': { 'Meta': {'object_name': 'FloatingIP'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ipv4': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15', 'db_index': 'True'}), 'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'to': "orm['db.VirtualMachine']"}), 'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'to': "orm['db.Network']"}), 'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'floating_ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}), 'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, 'db.ipaddress': { 'Meta': {'unique_together': "(('network', 'address'),)", 'object_name': 'IPAddress'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'network': ('django.db.models.fields.related.ForeignKey', [], {'rela
mharding01/herbpy
src/herbpy/action/grasping.py
Python
bsd-3-clause
9,013
0.005658
import logging, openravepy, prpy from prpy.action import ActionMethod from prpy.planning.base import PlanningError from contextlib import contextmanager logger = logging.getLogger('herbpy') @ActionMethod def Grasp(robot, obj, manip=None, preshape=[0., 0., 0., 0.], tsrlist=None, render=True, **kw_args): """ @param robot The robot performing the push grasp @param obj The object to push grasp @param manip The manipulator to perform the grasp with (if None active manipulator is used) @param preshape The grasp preshape for the hand @param tsrlist A list of TSRChain objects to use for planning to grasp pose (if None, the 'grasp' tsr from tsrlibrary is used) @param render Render tsr samples and push direction vectors during planning """ HerbGrasp(robot, obj, manip=manip, preshape=preshape, tsrlist=tsrlist, render=render) @ActionMethod def PushGrasp(robot, obj, push_distance=0.1, manip=None, preshape=[0., 0., 0., 0.], push_required=True, tsrlist=None, render=True, **kw_args): """ @param robot The robot performing the push grasp @param obj The object to push grasp @param distance The distance to push before grasping @param manip The manipulator to perform the grasp with (if None active manipulator is used) @param push_required If true, throw exception if a plan for the pushing movement cannot be found. If false, continue with grasp even if push cannot be executed. @param preshape The grasp preshape for the hand @param tsrlist A list of TSRChain objects to use for planning to grasp pose (if None, the 'grasp' tsr from tsrlibrary is used) @param render Render tsr samples and push direction vectors during planning """ if tsrlist is None: tsrlist = robot.tsrlibrary(obj, 'push_grasp', push_distance=push_distance) HerbGrasp(robot, obj, manip=manip, preshape=preshape, push_distance=push_distance, tsrlist=tsrlist, render=render) def HerbGrasp(robot, obj, push_distance=None, manip=None, preshape=[0., 0., 0., 0.], push_required=False, tsrlist=None, render=True, **kw_args): """ @param robot The robot performing the push grasp @param obj The object to push grasp @param distance The distance to push before grasping (if None, no pushing) @param manip The manipulator to perform the grasp with (if None active manipulator is used) @param preshape The grasp preshape for the hand @param push_required If true, throw exception if a plan for the pushing movement cannot be found. If false, continue with grasp even if push cannot be executed. (only used if distance is not None) @param render Render tsr samples and push direction vectors during planning """ if manip is None: with robot.GetEnv(): manip = robot.GetActiveManipulator() # Move the hand to the grasp preshape manip.hand.MoveHand(*preshape) # Get the grasp tsr if tsrlist is None: tsrlist = robot.tsrlibrary(obj, 'grasp') # Plan to the grasp with prpy.viz.RenderTSRList(tsrlist, robot.GetEnv(), render=render): manip.PlanToTSR(tsrlist) if push_distance is not None: ee_in_world = manip.GetEndEffectorTransform() push_direction = ee_in_world[:3,2] # Move the object into the hand env = robot.GetEnv() with env: obj_in_world = obj.GetTransform() # First move back until collision stepsize = 0.01 total_distance = 0.0 while not env.CheckCollision(robot, obj) and total_distance <= push_distance: obj_in_world[:3,3] -= stepsize*push_direction total_distance += stepsize obj.SetTransform(obj_in_world) # Then move forward until just out of collision stepsize = 0.001 while env.CheckCollision(robot, obj): obj_in_world[:3,3] += stepsize*push_direction obj.SetTransform(obj_in_world) # Manipulator must be active for grab to work properly p = openravepy.KinBody.SaveParameters with robot.CreateRobotStateSaver(p.ActiveManipulator): robot.SetActiveManipulator(manip) robot.Grab(obj) # Now execute the straight line movement with prpy.viz.RenderVector(ee_in_world[:3,3], push_direction, push_distance, robot.GetEnv(), render=render): try: with prpy.rave.Disabled(obj): manip.PlanToEndEffectorOffset(direction = push_direction, distance = push_distance, **kw_args) except PlanningError, e: if push_required: raise else: logger.warn('Could not find a plan for straight line push. Ignoring.') robot.Release(obj) # Now close the hand to grasp manip.hand.CloseHand() # Manipulator must be active for grab to work properly p = openravepy.KinBody.SaveParameters with robot.CreateRobotStateSaver(p.ActiveManipulator): robot.SetActiveManipulator(manip) robot.Grab(obj) @ActionMethod def Lift(robot, obj, distance=0.05, manip=None, render=True, **kw_args): """ @param robot The robot performing the push grasp @param obj The object to lift @param distance The distance to lift the cup @param manip The manipulator to perform the grasp with (if None active manipulator is used) @param render Render tsr samples and push direction vectors during planning """ if manip is None: with robot.GetEnv(): manip = robot.GetActiveManipulator() # Check for collision and disable anything in collision creport = openravepy.CollisionReport() disabled_objects = [] # Resolve inconsistencies in grabbed objects if robot.CheckSelfCollision(): grabbed_objs = robot.GetGrabbed() for obj in grabbed_objs
: robot.Rele
ase(obj) for obj in grabbed_objs: robot.Grab(obj) # Create list of any current collisions so those can be disabled while robot.GetEnv().CheckCollision(robot, creport): collision_obj = creport.plink2.GetParent() disabled_objects.append(collision_obj) collision_obj.Enable(False) for obj in disabled_objects: obj.Enable(True) # Perform the lift with prpy.rave.AllDisabled(robot.GetEnv(), disabled_objects): lift_direction = [0., 0., 1.] lift_distance = distance ee_in_world = manip.GetEndEffectorTransform() with prpy.viz.RenderVector(ee_in_world[:3,3], lift_direction, distance, robot.GetEnv(), render=render): manip.PlanToEndEffectorOffset(direction=lift_direction, distance=lift_distance, **kw_args) @ActionMethod def Place(robot, obj, on_obj, given_point_on=None, manip=None, render=True, **kw_args): """ Place an object onto another object This assumes the 'point_on' tsr is defined for the on_obj and the 'place' tsr is defined for obj @param robot The robot performing the push grasp @param obj The object to place @param on_obj The object to place obj on <<<<<<< HEAD @param given_point_on 4x4 numpy array (pose matrix) "X"-marked location on on_obj, in on_obj's coordinates. ======= @param given_point_on "X"-marked location on on_obj, in on_obj's coordinates. >>>>>>> 9f308684b627a4226976116aa37f44343fa92eb8 @param manip The manipulator to perform the grasp with (if None active manipulator is used) @param render Render tsr samples and push direction vectors during planning """ if manip is None: with robot.GetEnv():
joereynolds/Mr-Figs
src/minigames/hunt/game.py
Python
gpl-3.0
2,622
0.002288
import pygame import src.graphics as graphics import src.colours as colours import src.config as config import src.scenes.scenebase as scene_base from src.minigames.hunt.input_handler import InputHandler from src.gui.clickable import Clickable from src.resolution_asset_sizer import ResolutionAssetSizer from src.tiled_map import TiledMap from src.game_object.deadly_area import DeadlyArea from src.minigames.hunt.player import Player from src.minigames.hunt.collectible import Collectible class Hunt(scene_base.SceneBase): """The Hunt minigame...pretty much snake""" def __init__(self, previous, current_stage=1): self.current_stage = current_stage self.file = './assets/game-data/levels/minigames/hunt/minigame-hunt-' + str(self.current_stage) + '.tmx' self.surface = graphics.get_window_surface() self.tiled_map = TiledMap(self.file, self.surface) self.sprites = self.tiled_map.sprites self.player = self.get_player() self.collectibles = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, Collectible)]) self.collideables = pygame.sprite.Group([sprite for sprite in self.sprites if isinstance(sprite, DeadlyArea)]) scene_base.SceneBase.__init__( self, InputHandler(self), graphics.get_controller() ) self.previous = previous self.width, self.height = pygame.display.get_window_size() def update(self, delta_time): self.sprites.update(delta_time, self.tiled_map) self.player.handle_collision(self.collectibles, self.collideables) if not self.player.alive(): self.reset() if self.has_completed_minigame(): self.previous.open_secured_door() self.switch_to_scene(self.previous) elif self.has_won(): self.next_stage() def has_won(self): has_no_enemies = True for sprite in self.sprites: if isinstance(sprite, Collectible): has_no_enemies
= False return has_no_enemies def has_completed_minigame(self):
return self.has_won() and self.current_stage == 3 def render(self): self.surface.fill(colours.RED) self.sprites.draw(self.surface) def get_player(self): for sprite in self.sprites: if isinstance(sprite, Player): return sprite def reset(self): self.__init__(self.previous, self.current_stage) def next_stage(self): self.current_stage += 1 self.__init__(self.previous, self.current_stage)
indico/indico
indico/modules/auth/blueprint.py
Python
mit
2,168
0.005996
# This file is part of Indico. # Copyright (C) 2002 - 2022 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from flask import request from indico.modules.auth.controllers import (RHAccounts, RHAdminImpersonate, RHLinkAccount, RHLogin, RHLoginForm, RHLogout, RHRegister, RHRemoveAccount, RHResetPassword) from indico.web.flask.util import make_compat_redirect_func from indico.web.flask.wrappers import IndicoBlueprint _bp = IndicoBlueprint('auth', __name__,
template_folder='templates', virtual_template_folder='auth') _bp.add_url_rule('/
login/', 'login', RHLogin, methods=('GET', 'POST')) _bp.add_url_rule('/login/<provider>/', 'login', RHLogin) _bp.add_url_rule('/login/<provider>/form', 'login_form', RHLoginForm) _bp.add_url_rule('/login/<provider>/link-account', 'link_account', RHLinkAccount, methods=('GET', 'POST')) _bp.add_url_rule('/logout/', 'logout', RHLogout) _bp.add_url_rule('/register/', 'register', RHRegister, methods=('GET', 'POST'), defaults={'provider': None}) _bp.add_url_rule('/register/<provider>', 'register', RHRegister, methods=('GET', 'POST')) _bp.add_url_rule('/reset-password/', 'resetpass', RHResetPassword, methods=('GET', 'POST')) _bp.add_url_rule('/admin/users/impersonate', 'admin_impersonate', RHAdminImpersonate, methods=('POST',)) with _bp.add_prefixed_rules('/user/<int:user_id>', '/user'): _bp.add_url_rule('/accounts/', 'accounts', RHAccounts, methods=('GET', 'POST')) _bp.add_url_rule('/accounts/<identity>/remove/', 'remove_account', RHRemoveAccount, methods=('POST',)) @_bp.url_defaults def _add_user_id(endpoint, values): if endpoint in {'auth.accounts', 'auth.remove_account'} and 'user_id' not in values: values['user_id'] = request.view_args.get('user_id') # Legacy URLs auth_compat_blueprint = _compat_bp = IndicoBlueprint('compat_auth', __name__) _compat_bp.add_url_rule('/user/login', 'login', make_compat_redirect_func(_bp, 'login')) _compat_bp.add_url_rule('/user/register', 'register', make_compat_redirect_func(_bp, 'register'))
DemocracyClub/EveryElection
every_election/apps/core/migrations/0001_initial.py
Python
bsd-3-clause
621
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations from django.contrib.auth.models import Group, User def add_moderator_group(apps, schema_editor): g = Group.objects.create(name="moderators") g.save() for user in User.objects.all(): # add any existing admin users # to the moderators group when we create it
if user.is_superuser: g.user_set.add(user) class Migration(migrations.Migration): dependencies = [("auth", "0008_alter_user_username_max_length")] operations = [migrations.RunPython(add_moderator
_group)]
mkolar/pyblish-kredenc
pyblish_kredenc/plugins/nuke/extract_scene_save.py
Python
lgpl-3.0
313
0
import
nuke import pyblish.api class ExtractSceneSave(pyblish.api.Extractor): """ """ hosts = ['nuke'] order = pyblish.api.Extractor.order - 0.45 families = ['scene'] label = 'Scene Save' def process(self, instance):
self.log.info('saving scene') nuke.scriptSave()
jkpr/pma-api
pma_api/response.py
Python
mit
3,061
0
"""Responses.""" from io import StringIO from csv import DictWriter from flask import Response, jsonify, make_response from .__version__ import __version__ class ApiResult: """A representation of a generic JSON API result.""" def __init__(self, data, metadata=None, **kwargs): """Store input arguments. Args: data (dict): A dictionary built up for the API to return metadata (dict): A dictionary of keys and values to add to the metadata field of the return object. """ self.data = data self.extra_metadata = metadata self.kwargs = kwargs def to_response(self): """Make a response from the data.""" metadata = self.metadata(self.extra_metadata) obj = { **self.data, **self.kwargs, 'metadata': metadata } return jsonify(obj) @staticmethod def metadata(extra_metadata=None): """Return metadata.""" from .models import SourceData obj = { 'version': __version__, 'datasetMetadata': [item.to_json() for item in SourceData.query.all()] } if extra_metadata: obj.update(extra_metadata) return obj class QuerySetApiResult(ApiResult): """A representation of a list of records (Python dictionaries).""" def __init__(self, record_list, return_format, metadata=None, **kwargs): """Store the list of records and the format.""" super().__init__(record_list, metadata, **kwargs) self.record_list = record_list self.return_format = return_format def to_response(self): """Convert the list of re
cords into a response.""" if self.return_format == 'csv' and self.record_list: return self.csv_response(self.record_list) elif self.return_f
ormat == 'csv': # and not self.record_list return make_response('', 204) # Default is JSON return self.json_response(self.record_list, self.extra_metadata, **self.kwargs) @staticmethod def csv_response(record_list): """CSV Response.""" string_io = StringIO() header = record_list[0].keys() writer = DictWriter(f=string_io, fieldnames=header) writer.writeheader() writer.writerows((item for item in record_list)) result = string_io.getvalue() return Response(result, mimetype='text/csv') @staticmethod def json_response(record_list, extra_metadata, **kwargs): """Convert a list of records into a JSON response.""" obj = { **kwargs, 'results': record_list, 'resultSize': len(record_list), 'metadata': ApiResult.metadata(extra_metadata) } return jsonify(obj) # TODO: (jef/jkp 2017-08-29) Add methods for: # * return warnings, errors # * return version number # * documentation # Needs: Decision on how these should be returned.
akvo/akvo-rsr
akvo/iati/checks/fields/related_activities.py
Python
agpl-3.0
1,444
0.003463
# -*- coding: utf-8 -*- # Akvo RSR is covered by the GNU Affero General Public License. # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. def related_activities(project): """ Check if related project has an IATI identifier and relation. :param project: Project object :return: All checks passed boolean, [Check results] """ checks = [] all_c
hecks_passed = True related_projects_count = p
roject.related_projects.count() for rp in project.related_projects.prefetch_related('related_project').all(): if not (rp.related_project or rp.related_iati_id): all_checks_passed = False checks.append(('error', 'related project or IATI identifier not specified')) elif rp.related_project and not rp.related_project.iati_activity_id: all_checks_passed = False checks.append(('error', 'related project (id: %s) has no IATI identifier specified' % str(rp.related_project.pk))) if not rp.relation: all_checks_passed = False checks.append(('error', 'relation missing for related project')) if related_projects_count > 0 and all_checks_passed: checks.append(('success', 'has valid related project(s)')) return all_checks_passed, checks
ict-felix/stack
modules/resource/orchestrator/src/core/utils/calls.py
Python
apache-2.0
6,238
0.00016
from formatting import print_call import credentials import os.path import re import xmlrpclib def _get_ch_params(): # Initialise variables when required from core.config import FullConfParser fcp = FullConfParser() username = fcp.get("auth.conf").get("certificates").get("username") ch_host = fcp.get("auth.conf").get("clearinghouse").get("host") ch_port = fcp.get("auth.conf").get("clearinghouse").get("port") ch_end = fcp.get("auth.conf").get("clearinghouse").get("endpoint") return (username, ch_host, ch_port, ch_end) def api_call(method_name, endpoint=None, params=[], username=None, verbose=False): user, _, _, ch_end = _get_ch_params() username = username or user endpoint = endpoint or ch_end key_path = "%s-key.pem" % username cert_path = "%s-cert.pem" % username res = ssl_call(method_name, params, endpoint, key_path=key_path, cert_path=cert_path) if verbose: print_call(method_name, params, res) return res.get("code", None), \ res.get("value", None), res.get("output", None) def ch_call(method_name, endpoint=None, params=[], username=None, verbose=False): user, ch_host, ch_port, ch_end = _get_ch_params() username = username or user endpoint = endpoint or ch_end key_path = "%s-key.pem" % username cert_path = "%s-cert.pem" % username res = ssl_call(method_name, params, endpoint, key_path=key_path, cert_path=cert_path, host=ch_host, port=ch_port) return res def handler_call(method_name, params=[], username=None, arg=[]): if username is None: user, _, _, _ = _get_ch_params() verbose = False if arg in ["-v", "--verbose"]: verbose = True return api_call(method_name, "/xmlrpc/geni/3/", params=params, username=username, verbose=verbose) class SafeTransportWithCert(xmlrpclib.SafeTransport): """ Helper class to force the right certificate for the transport class. """ def __init__(self, key_path, cert_path): # No super because of old-style class xmlrpclib.SafeTransport.__init__(self) sel
f._key_path = key_path self._cert_path = cert_path def make_connection(self, host): """ This method will automatically be called by the ServerProxy class when a transport channel is needed. """ host_with_cert = (host, {"key_file": self._key_path, "cert_file": self._cert_path}) # No super because of old-style class return xmlrpclib.SafeTransport.make_connection(self, host_with_cert) def ss
l_call(method_name, params, endpoint, key_path=None, cert_path=None, host=None, port=None): username, ch_host, ch_port, ch_end = _get_ch_params() key_path = key_path or ("%-key.pem" % username) cert_path = cert_path or ("%-cert.pem" % username) host = host or ch_host port = port or ch_port endpoint = endpoint or ch_end # Start logic creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../..", "cert")) if not os.path.isabs(key_path): key_path = os.path.join(creds_path, key_path) if not os.path.isabs(cert_path): cert_path = os.path.join(creds_path, cert_path) key_path = os.path.abspath(os.path.expanduser(key_path)) cert_path = os.path.abspath(os.path.expanduser(cert_path)) if not os.path.isfile(key_path) or not os.path.isfile(cert_path): raise RuntimeError("Key or cert file not found (%s, %s)" % (key_path, cert_path)) transport = SafeTransportWithCert(key_path, cert_path) if endpoint and len(endpoint): if endpoint[0] == "/": endpoint = endpoint[1:] proxy = xmlrpclib.ServerProxy("https://%s:%s/%s" % (host, str(port), endpoint), transport=transport) # return proxy.get_version() method = getattr(proxy, method_name) return method(*params) def getusercred(geni_api=3): """Retrieve your user credential. Useful for debugging. If you specify the -o option, the credential is saved to a file. If you specify --usercredfile: First, it tries to read the user cred from that file. Second, it saves the user cred to a file by that name (but with the appropriate extension) Otherwise, the filename is <username>-<framework nickname from config file>-usercred.[xml or json, depending on AM API version]. If you specify the --prefix option then that string starts the filename. If instead of the -o option, you supply the --tostdout option, then the usercred is printed to STDOUT. Otherwise the usercred is logged. The usercred is returned for use by calling scripts. e.g.: Get user credential, save to a file: omni.py -o getusercred Get user credential, save to a file with filename prefix mystuff: omni.py -o -p mystuff getusercred """ from core.config import FullConfParser fcp = FullConfParser() username = fcp.get("auth.conf").get("certificates").get("username") creds_path = os.path.normpath( os.path.join(os.path.dirname(__file__), "../../..", "cert")) cert_path = os.path.join(creds_path, "%s-cert.pem" % username) # Retrieve new credential by contacting with GCF CH try: user_cert = open(cert_path, "r").read() cred = ch_call("CreateUserCredential", params=[user_cert]) # Exception? -> Retrieve already existing credential from disk (CBAS) except: cred_path = os.path.join(creds_path, "%s-cred.xml" % username) cred = open(cred_path).read() if geni_api >= 3: if cred: cred = credentials.wrap_cred(cred) credxml = credentials.get_cred_xml(cred) # pull the username out of the cred # <owner_urn>urn:publicid:IDN+geni:gpo:gcf+user+alice</owner_urn> user = "" usermatch = re.search( r"\<owner_urn>urn:publicid:IDN\+.+\+user\+(\w+)\<\/owner_urn\>", credxml) if usermatch: user = usermatch.group(1) return ("Retrieved %s user credential" % user, cred)
XiaJieCom/change
Demo/days10/EchoClient.py
Python
lgpl-2.1
658
0.018237
from twisted.internet import reactor,protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write("hello a ") def dataReceived(self, data): print('Server said:',data) self.transport.loseConnection
() def connectionLost(self, reason): print('connection lost') class EchoFatoty(protocol.ClientFactory): protocol = EchoClient def clientConnectionFailed(self, connector, reason): print('Connection lost - goodbye!')
reactor.stop() def main(): f = EchoFatoty() reactor.connectTCP('localhost',9090,f) reactor.run() if __name__ == '__main__': main()
FEniCS/dolfin
site-packages/dolfin/functions/__init__.py
Python
lgpl-3.0
827
0.001209
# -*- coding: utf-8 -*- """The function module of dolfin""" from dolfin.functions import multimeshfunction from dolfin.functions import functionspace from dolfin.functions import function from dolfin.functions import constant from dolfin.functions import expression from dolfin.functions import specialfunctions from .multimeshfunction import * from .functionspace import * from .function import * from .constant import * from .expression import * from .specialfunctions import * # NOTE: The automatic documentation sys
tem in DOLFIN requires to _not_ define # NOTE: classes or functions within this file. Use separate modules for that # NOTE: purpose. __all__ = functionspace.__all__ + fu
nction.__all__ + constant.__all__ + \ expression.__all__ + specialfunctions.__all__ + \ multimeshfunction.__all__
jgowans/directionFinder_web
directionFinder_web/views/hello_json.py
Python
gpl-2.0
400
0.0125
from pyramid.view import view_config import logging @view_config(route_name='hello_json', renderer='json') def hello_json(request): logger = logging.getLogger(__nam
e__) logger.info("Got JSON from name: {n}".format(n = __name__))
request.session['counter'] = request.session.get('counter', 0) + 1 return { 'a': [1,2,request.session['counter']], 'b': ['x', 'y'], }
mondwan/ProjectRazzies
twitter_analyse.py
Python
mit
9,448
0.004128
#!/usr/bin/env python """ File: twitter_analyse.py Author: Me Email: 0 Github: 0 Description: Analyse tweets. For the detail, please refer to the document ```twitter_analyse.notes``` """ # System lib from __future__ import division import json import os from math import log import numpy # 3-rd party lib # import nltk from nltk.classify import NaiveBayesClassifier from textblob import TextBlob # Constants TWEET_DIR = os.path.join('.', 'twitter_data') OSCAR_DIR = os.path.join(TWEET_DIR, 'oscar') RAZZIES_DIR = os.path.join(TWEET_DIR, 'razzies') PREDICT_DIR = os.path.join(TWEET_DIR, 'proof') CANDIDATE_DIR = os.path.join(TWEET_DIR, 'candidates') # PREDICT_OSCAR_DIR = os.path.join(PREDICT_DIR, 'oscar') # PREDICT_RAZZIES_DIR = os.path.join(PREDICT_DIR, 'razzies') def attribute_to_characteristic(tweet): """ Extract attributes from a tweet and form a characteristic of a tweet @param tweet dict @return dict Charateristic of a tweet """ ret = {} text = tweet['text'] retweets = tweet['retweet_count'] favorites = tweet['favorite_count'] followers = tweet['author_followers'] friends = tweet['author_friends'] publishes = tweet['author_num_of_status'] blob = TextBlob(text) polarity = blob.sentiment.polarity ret['scaled_polarity'] = calculate_scaled_polarity( polarity, int(retweets), int(favorites), int(followers), int(friends), int(publishes) ) ret['retweets'] = retweets ret['favorites'] = favorites ret['followers'] = followers ret['friends'] = friends ret['publishes'] = publishes ret['polarity'] = polarity # print 'p=%.2f re=%d fav=%d, fol=%d, fd=%d, pub=%d' % ( # polarity, retweets, favorites, followers, friends, publishes # ) return ret def calculate_scaled_polarity( polarity, retweets, favorites, followers, friends, publishes): """ Return a scaled polarity for a tweet @param polarity float @param retweets int @param favorites int @param followers int @param friends int @param publishes int @return float """ # Avoid zero case and negative value retweets = retweets if retweets > 0 else 1 favorites = favorites if favorites > 0 else 1 followers = followers if followers > 0 else 1 friends = friends if friends > 0 else 1 publishes = publishes if publishes > 0 else 1 # Entropy ret = polarity * \ ( log(retweets, 2) + log(favorites, 2) + log(followers, 2) + log(friends, 2) + log(publishes, 2) ) return round(ret, 2) def tweets2film(tweet_characteristics): """ Aggreate tweet's characteristics to form a film's characteristics @param tweet_characteristics list of dict @return dict characteristics of a film """ ret = {} retweets_data = [] favorites_data = [] polarities_data = [] friends_data = [] followers_data = [] for t in tweet_characteristics: retweets_data.append(t['retweets']) favorites_data.append(t['favorites']) polarities_data.append(t['polarity']) friends_data.append(t['friends']) followers_data.append(t['followers']) retweets = numpy.array(retweets_data) favorites = numpy.array(favorites_data) polarities = numpy.array(polarities_data) friends = numpy.array(friends_data) followers = numpy.array(followers_data) for data_set in [ ('retweets', retweets), ('favorites', favorites), ('polarities', polarities), ('friends', friends),
('followers', followers) ]: data_name = data_set[0] data_list = data_set[1] print '|%s| sd: %f mean: %f min: %d max: %d' % ( data_name, round(data_list.std(), 2), round(numpy.average(data_list), 2),
data_list.min(), data_list.max(), ) # ret['avg_followers'] = round(numpy.average(followers_data), 2) # ret['avg_friends'] = round(numpy.average(friends_data), 2) ret['avg_polarity'] = round(numpy.average(polarities_data), 2) # ret['avg_retweet'] = round(numpy.average(retweets_data), 2) # ret['std_friends'] = round(friends.std(), 2) # ret['std_followers'] = round(followers.std(), 2) # ret['std_polarity'] = round(polarities.std(), 2) ret['std_retweet'] = round(retweets.std(), 2) # ret['log_friends'] = round(log(sum(friends_data)) / log(2), 2) # ret['log_followers'] = round(log(sum(followers_data)) / log(2), 2) ret['log_retweets'] = round(log(sum(retweets_data)) / log(2), 2) ret['log_favorites'] = round(log(sum(favorites_data)) / log(2), 2) return ret def construct_film_characteristic(film_name, tweet_characteristics): """ Construct featuresets for given parameters @param film_name string @param tweet_characteristics list of dict @return featuresets """ ret = {} # Analyze film's attributes ret['length_of_film'] = len(film_name) ret['number_of_words'] = len(film_name.split(' ')) # Analyze tweet's characteristics aggreated_characteristic = tweets2film(tweet_characteristics) # Merge 2 characteristics ret = dict(ret.items() + aggreated_characteristic.items()) return ret def predictCandidates(): list_of_files = os.listdir(CANDIDATE_DIR) for fn in list_of_files: path = os.path.join(CANDIDATE_DIR, fn) film_name = os.path.splitext(fn)[0] with open(path, 'r') as f: tweets = json.load(f) tweets = json.loads(tweets) tweet_characteristics = [] for tweet in tweets: # Per tweet analyze characteristic = attribute_to_characteristic(tweet) tweet_characteristics.append(characteristic) film_characteristic = construct_film_characteristic( film_name, tweet_characteristics ) result = classifier.classify(film_characteristic) print 'film: |%s| PREDICT: |%s|\n' % (film_name, result) features = [] for my_dir in [OSCAR_DIR, RAZZIES_DIR]: label = os.path.basename(my_dir) print "=========== Training {0} ============".format(label) for fn in os.listdir(my_dir): path = os.path.join(my_dir, fn) film_name = os.path.splitext(fn)[0] # print 'dir=%s, film_name=%s, path=%s' % (my_dir, film_name, path) with open(path, 'r') as f: tweets = json.load(f) tweets = json.loads(tweets) tweet_characteristics = [] for tweet in tweets: # Per tweet analyze characteristic = attribute_to_characteristic(tweet) tweet_characteristics.append(characteristic) try: film_characteristic = construct_film_characteristic( film_name, tweet_characteristics ) except Exception as e: print '{0}: {1}'.format(film_name, e) else: # print 'film: |%s|' % film_name # print film_characteristic feature = (film_characteristic, label) features.append(feature) # Train the classifier classifier = NaiveBayesClassifier.train(features) classifier.show_most_informative_features(10) # Predict the film report = {} predict_labels = ['oscar', 'razzies'] for predict_label in predict_labels: my_dir = os.path.join(PREDICT_DIR, predict_label) list_of_files = os.listdir(my_dir) report[predict_label] = { 'number_of_match': 0, 'number_of_films': len(list_of_files) } for fn in list_of_files: path = os.path.join(my_dir, fn) film_name = os.path.splitext(fn)[0] with open(path, 'r') as f: tweets = json.load(f) tweets = json.loads(tweets) tweet_characteristics = [] for tweet in tweets: # Per tweet analyze characteristic = attribute_to_characteristic(tweet) tweet_characteristics.append(characteristic) film_characteristic = construct_film_characteristic( f
dynaryu/inasafe
safe/gui/tools/minimum_needs/needs_manager_dialog.py
Python
gpl-3.0
33,744
0
# coding=utf-8 """ Impact Layer Merge Dialog. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Christian Christelis christian@kartoza.com' __revision__ = '$Format:%H$' __date__ = '27/10/2014' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') import os from os.path import expanduser, basename # This import must come first to force sip2 api # noinspection PyUnresolvedReferences # pylint: disable=unused-import from qgis.core import QGis # force sip2 api # noinspection PyPackageRequirements from PyQt4 import QtGui # noinspection PyPackageRequirements from PyQt4.QtGui import ( QDialog, QFileDialog, QGridLayout, QPushButton, QDialogButtonBox, QMessageBox, QIcon ) from PyQt4.QtCore import pyqtSignature, pyqtSlot from safe.common.resource_parameter import ResourceParameter from safe_extras.parameters.float_parameter import FloatParameter from safe_extras.parameters.qt_widgets.parameter_container import ( ParameterContainer) from safe_extras.parameters.parameter_exceptions import ( ValueOutOfBounds, InvalidMaximumError, InvalidMinimumError) from safe_extras.parameters.string_parameter import StringParameter from safe_extras.parameters.text_parameter import TextParameter from safe.utilities.resources import ( resources_path, get_ui_class, html_footer, html_header) from safe.messaging import styles from safe.gui.tools.minimum_needs.needs_profile import NeedsProfile from safe.utilities.i18n import tr from safe.gui.tools.help.needs_manager_help import needs_manager_helps INFO_STYLE = styles.INFO_STYLE FORM_CLASS = get_ui_class('needs_manager_dialog_base.ui') class NeedsManagerDialog(QDialog, FORM_CLASS): """Dialog class for the InaSAFE global minimum needs configuration. .. versionadded:: 2.2. """ def __init__(self, parent=None, dock=None): """Constructor for the minimum needs dialog. :param parent: Parent widget of this dialog. :type parent: QWidget :param dock: Dock widget instance that we can notify of changes. :type dock: Dock """ QtGui.QDialog.__init__(self, parent) self.setupUi(self) self.dock = dock # These are in the little button bar at the top # 'Remove resource' button # noinspection PyUnresolvedReferences self.remove_resource_button.clicked.connect(self.remove_resource) self.remove_resource_button.setIcon( QIcon(os.path.join( resources_path(), 'img', 'icons', 'remove.svg'))) # Add resource # noinspection PyUnresolvedReferences self.add_resource_button.clicked.connect(self.add_new_resource) self.add_resource_button.setIcon( QIcon(os.path.join( resources_path(), 'img', 'icons', 'add.svg'))) # Edit resource # noinspection PyUnresolvedReferences self.edit_resource_button.clicked.connect(self.edit_resource) self.edit_resource_button.setIcon( QIcon(os.path.join( resources_path(), 'img', 'icons', 'edit.svg'))) # Discard changes to a resource self.discard_changes_button = QPushButton(self.tr('Discard changes')) self.button_box.addButton( self.discard_changes_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.discard_changes_button.clicked.connect(self.discard_changes) # Save changes to a resource self.save_resource_button = QPushButton(self.tr('Save resource')) self.button_box.addButton( self.save_resource_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.save_resource_button.clicked.connect(self.save_resource) # Export profile button self.export_profile_button = QPushButton(self.tr('Export ...')) self.button_box.addButton( self.export_profile_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.export_profile_button.clicked.connect(self.export_profile) # Import profile button self.import_profile_button = QPushButton(self.tr('Import ...')) self.button_box.addButton( self.import_profile_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.import_profile_button.clicked.connect(self.import_profile) # New profile button self.new_profile_button = QPushButton(self.tr('New')) self.button_box.addButton( self.new_profile_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.new_profile_button.clicked.connect(self.new_profile) # Save profile button self.save_profile_button = QPushButton(self.tr('Save')) self.button_box.addButton( self.save_profile_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.save_profile_button.clicked.connect(self.save_profile) # 'Save as' profile button self.save_profile_as_button = QPushButton(self.tr('Save as')) self.button_box.addButton( self.save_profile_as_button, QDialogButtonBox.ActionRole) # noinspection PyUnresolvedReferences self.save_profile_as_button.clicked.connect( self.save_profile_as) # Set up things for context help self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help) # Allow toggling the help button self.help_button.setCheckable(True) self.help_button.toggled.connect(self.help_toggled) self.main_stacked_widget.setCurrentIndex(1) self.minimum_needs = NeedsProfile() self.edit_item = None # Remove profile button # noinspection PyUnresolvedReferences self.remove_profile_button.clicked.connect(self.remove_profile) # These are all buttons that will get hidden on context change # to the profile editing vie
w self.profile_editing_buttons = list() self.profile_editing_buttons.append(self.remove_resource_button) self.profile_editing_buttons.append(self.add_resource_button) self.profile_editing_buttons.append(self.edit_resource_button) self.profile_editing_bu
ttons.append(self.export_profile_button) self.profile_editing_buttons.append(self.import_profile_button) self.profile_editing_buttons.append(self.new_profile_button) self.profile_editing_buttons.append(self.save_profile_button) self.profile_editing_buttons.append(self.save_profile_as_button) # We also keep a list of all widgets to disable in context of resource # editing (not hidden, just disabled) self.profile_editing_widgets = self.profile_editing_buttons self.profile_editing_widgets.append(self.remove_profile_button) self.profile_editing_widgets.append(self.profile_combo) # These are all buttons that will get hidden on context change # to the resource editing view self.resource_editing_buttons = list() self.resource_editing_buttons.append(self.discard_changes_button) self.resource_editing_buttons.append(self.save_resource_button) for item in self.resource_editing_buttons: item.hide() self.load_profiles() # Next 2 lines fixes issues #1388 #1389 #1390 #1391 if self.profile_combo.count() > 0: self.select_profile(0) # initial sync profile_combo and resource list self.clear_resource_list() self.populate_resource_list() self.set_up_resource_parameters() # Only do this afterward load_profiles to avoid the resource list # being updated # noinspection PyUnresolvedReferences self.profile_combo.activated.connect(
stanley-cheung/grpc
src/python/grpcio_tests/tests_aio/unit/_common.py
Python
apache-2.0
3,617
0
# Copyright 2020 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a
pplicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIO
NS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from typing import AsyncIterable import grpc from grpc.aio._metadata import Metadata from grpc.aio._typing import MetadataKey from grpc.aio._typing import MetadataValue from grpc.aio._typing import MetadatumType from grpc.experimental import aio from tests.unit.framework.common import test_constants ADHOC_METHOD = '/test/AdHoc' def seen_metadata(expected: Metadata, actual: Metadata): return not bool(set(tuple(expected)) - set(tuple(actual))) def seen_metadatum(expected_key: MetadataKey, expected_value: MetadataValue, actual: Metadata) -> bool: obtained = actual[expected_key] return obtained == expected_value async def block_until_certain_state(channel: aio.Channel, expected_state: grpc.ChannelConnectivity): state = channel.get_state() while state != expected_state: await channel.wait_for_state_change(state) state = channel.get_state() def inject_callbacks(call: aio.Call): first_callback_ran = asyncio.Event() def first_callback(call): # Validate that all resopnses have been received # and the call is an end state. assert call.done() first_callback_ran.set() second_callback_ran = asyncio.Event() def second_callback(call): # Validate that all responses have been received # and the call is an end state. assert call.done() second_callback_ran.set() call.add_done_callback(first_callback) call.add_done_callback(second_callback) async def validation(): await asyncio.wait_for( asyncio.gather(first_callback_ran.wait(), second_callback_ran.wait()), test_constants.SHORT_TIMEOUT) return validation() class CountingRequestIterator: def __init__(self, request_iterator): self.request_cnt = 0 self._request_iterator = request_iterator async def _forward_requests(self): async for request in self._request_iterator: self.request_cnt += 1 yield request def __aiter__(self): return self._forward_requests() class CountingResponseIterator: def __init__(self, response_iterator): self.response_cnt = 0 self._response_iterator = response_iterator async def _forward_responses(self): async for response in self._response_iterator: self.response_cnt += 1 yield response def __aiter__(self): return self._forward_responses() class AdhocGenericHandler(grpc.GenericRpcHandler): """A generic handler to plugin testing server methods on the fly.""" _handler: grpc.RpcMethodHandler def __init__(self): self._handler = None def set_adhoc_handler(self, handler: grpc.RpcMethodHandler): self._handler = handler def service(self, handler_call_details): if handler_call_details.method == ADHOC_METHOD: return self._handler else: return None
vmturbo/nova
nova/virt/hyperv/vmops.py
Python
apache-2.0
48,910
0
# Copyright (c) 2010 Cloud.com, Inc # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for basic VM operations. """ import contextlib import functools import os import time from eventlet import timeout as etimeout from os_win import constants as os_win_const from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from nova.api.metadata import base as instance_metadata from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _, _LI, _LE, _LW from nova import objects from nova.objects import fields from nova import utils from nova.virt import configdrive from nova.virt import hardware from nova.virt.hyperv import block_device_manager from nova.virt.hyperv import constants from nova.virt.hyperv import imagecache from nova.virt.hyperv import pathutils from nova.virt.hyperv import serialconsoleops from nova.virt.hyperv import vif as vif_utils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) CONF = nova.conf.CONF SHUTDOWN_TIME_INCREMENT = 5 REBOOT_TYPE_SOFT = 'SOFT' REBOOT_TYPE_HARD = 'HARD' VM_GENERATIONS = { constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2 } VM_GENERATIONS_CONTROLLER_TYPES = { constants.VM_GEN_1: constants.CTRL_TYPE_IDE, constants.VM_GEN_2: constants.CTRL_TYPE_SCS
I } def check_admin_permissions(function): @functools.wraps(function) def wrapper(self, *args, **kwds): # Make sure the windows account has the required admin permissions. self._vmutils.check_admin_permissions() return function(self, *args, **kwds) return wrapper class VMOps(object): # The console log is stored in two files, each should have at most half of
# the maximum console log size. _MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2 _ROOT_DISK_CTRL_ADDR = 0 def __init__(self, virtapi=None): self._virtapi = virtapi self._vmutils = utilsfactory.get_vmutils() self._metricsutils = utilsfactory.get_metricsutils() self._vhdutils = utilsfactory.get_vhdutils() self._hostutils = utilsfactory.get_hostutils() self._pathutils = pathutils.PathUtils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() self._serial_console_ops = serialconsoleops.SerialConsoleOps() self._block_dev_man = ( block_device_manager.BlockDeviceInfoManager()) self._vif_driver = vif_utils.HyperVVIFDriver() def list_instance_uuids(self): instance_uuids = [] for (instance_name, notes) in self._vmutils.list_instance_notes(): if notes and uuidutils.is_uuid_like(notes[0]): instance_uuids.append(str(notes[0])) else: LOG.debug("Notes not found or not resembling a GUID for " "instance: %s", instance_name) return instance_uuids def list_instances(self): return self._vmutils.list_instances() def estimate_instance_overhead(self, instance_info): # NOTE(claudiub): When an instance starts, Hyper-V creates a VM memory # file on the local disk. The file size is the same as the VM's amount # of memory. Since disk_gb must be an integer, and memory is MB, round # up from X512 MB. return {'memory_mb': 0, 'disk_gb': (instance_info['memory_mb'] + 512) // units.Ki} def get_info(self, instance): """Get information about the VM.""" LOG.debug("get_info called for instance", instance=instance) instance_name = instance.name if not self._vmutils.vm_exists(instance_name): raise exception.InstanceNotFound(instance_id=instance.uuid) info = self._vmutils.get_vm_summary_info(instance_name) state = constants.HYPERV_POWER_STATE[info['EnabledState']] return hardware.InstanceInfo(state=state, max_mem_kb=info['MemoryUsage'], mem_kb=info['MemoryUsage'], num_cpu=info['NumberOfProcessors'], cpu_time_ns=info['UpTime']) def _create_root_device(self, context, instance, root_disk_info, vm_gen): path = None if root_disk_info['type'] == constants.DISK: path = self._create_root_vhd(context, instance) self.check_vm_image_type(instance.uuid, vm_gen, path) root_disk_info['path'] = path def _create_root_vhd(self, context, instance, rescue_image_id=None): is_rescue_vhd = rescue_image_id is not None base_vhd_path = self._imagecache.get_cached_image(context, instance, rescue_image_id) base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) base_vhd_size = base_vhd_info['VirtualSize'] format_ext = base_vhd_path.split('.')[-1] root_vhd_path = self._pathutils.get_root_vhd_path(instance.name, format_ext, is_rescue_vhd) root_vhd_size = instance.flavor.root_gb * units.Gi try: if CONF.use_cow_images: LOG.debug("Creating differencing VHD. Parent: " "%(base_vhd_path)s, Target: %(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._vhdutils.create_differencing_vhd(root_vhd_path, base_vhd_path) vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) if vhd_type == constants.DISK_FORMAT_VHD: # The base image has already been resized. As differencing # vhdx images support it, the root image will be resized # instead if needed. return root_vhd_path else: LOG.debug("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._pathutils.copyfile(base_vhd_path, root_vhd_path) root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( base_vhd_path, root_vhd_size)) if not is_rescue_vhd and self._is_resize_needed( root_vhd_path, base_vhd_size, root_vhd_internal_size, instance): self._vhdutils.resize_vhd(root_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(root_vhd_path): self._pathutils.remove(root_vhd_path) return root_vhd_path def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
VUIIS/dax
dax/cluster.py
Python
mit
11,384
0
#!/usr/bin/env python # -*- coding: utf-8 -*- """ cluster.py Cluster functionality """ import os import time import logging import subprocess as sb from datetime import datetime from .dax_settings import DAX_Settings from .errors import ClusterError __copyright__ = 'Copyright 2013 Vanderbilt University. All Rights Reserved' DAX_SETTINGS = DAX_Settings() MAX_TRACE_DAYS = 30 LOGGER = logging.getLogger('dax') def c_output(output): """ Check if the output value is an integer :param output: variable to check :return: True if output is not an integer, False otherwise. """ try: int(output) error = False except ValueError as err: error = True LOGGER.error(err) return error def count_jobs(): """ Count the number of jobs in the queue on the cluster :return: number of jobs in the queue """ if command_found(cmd=DAX_SETTINGS.get_cmd_submit()): cmd = DAX_SETTINGS.get_cmd_count_nb_jobs() output = sb.check_output(cmd, shell=True) error = c_output(output) while error: LOGGER.info(' try again to access number of jobs in 2 seconds.') time.sleep(2) output = sb.check_output(cmd, shell=True) error = c_output(output) if int(output) < 0: return 0 else: return int(output) else: LOGGER.info(' Running locally. No queue with jobs.') return 0 def job_status(jobid): """ Get the status for a job on the cluster :param jobid: job id to check :return: job status """ cmd = DAX_SETTINGS.get_cmd_get_job_status()\ .safe_substitute({'jobid': jobid}) LOGGER.debug(str(cmd).strip()) try: output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True) LOGGER.debug('output='+str(output)) output = output.decode().strip() if output == DAX_SETTINGS.get_running_status(): return 'R' elif output == DAX_SETTINGS.get_queue_status(): return 'Q' elif output == DAX_SETTINGS.get_complete_status() or len(output) == 0: return 'C' else: return None except sb.CalledProcessError as e: LOGGER.debug(str(e)) return None def is_traceable_date(jobdate): """ Check if the job is traceable on the cluster :param jobdate: launching date of the job :return: True if traceable, False otherwise. """ try: trace_date = datetime.strptime(jobdate, "%Y-%m-%d") diff_days = (datetime.today() - trace_date).days return diff_days <= MAX_TRACE_DAYS except ValueError: return False def tracejob_info(jobid, jobdate): """ Trace the job information from the cluster :param jobid: job id to check :param jobdate: launching date of the job :return: dictionary object with 'mem_used', 'walltime_used', 'jobnode' """ time_s = datetime.strptime(jobdate, "%Y-%m-%d") diff_days = (datetime.today() - time_s).days + 1 jobinfo = dict() jobinfo['mem_used'] = get_job_mem_used(jobid, diff_days) jobinfo['walltime_used'] = get_job_walltime_used(jobid, diff_days) jobinfo['jobnode'] = get_job_node(jobid, diff_days) return jobinfo def get_job_mem_used(jobid, diff_days): """ Get the memory used for the task from cluster :param jobid: job id to check :param diff_days: difference of days between starting date and now :return: string with the memory usage, empty string if error """ mem = '' # Check for blank jobid if not jobid: return mem cmd = DAX_SETTINGS.get_cmd_get_job_memory()\ .safe_substitute({'numberofdays': diff_days, 'jobid': jobid}) try: output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True) if output.startswith(b'sacct: error'): raise ClusterError(output) if output: mem = output.strip() mem = mem.decode() except (sb.CalledProcessError, ClusterError): pass return mem def get_job_walltime_used(jobid, diff_days): """ Get the walltime used for the task from cluster :param jobid: job id to check :param diff_days: difference of days between starting date and now :return: string with the walltime used, empty string if error """ walltime = '' # Check for blank jobid if not jobid: return walltime cmd = DAX_SETTINGS.get_cmd_get_job_walltime()\ .safe_substitute({'numberofdays': diff_days, 'jobid': jobid}) try: output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True) if output: walltime = output.strip() walltime = walltime.decode() except sb.CalledProcessError: pass if not walltime and diff_days > 3: walltime = 'NotFound' return walltime def get_job_node(jobid, diff_days): """ Get the node where the job was running on the cluster :param jobid: job id to check :param diff_days: difference of days between starting date and now :return: string with the node, empty string if error """ jobnode = '' # Check for blank jobid if not jobid: return jobnode if jobid == 'no_qsub': cmd = 'uname -a' output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True) if output and len(output.strip().split(' ')) > 1: jobnode = output.strip().split(' ')[1] return jobnode cmd = DAX_SETTINGS.get_cmd_get_job_node()\ .safe_substitute({'numberofdays': diff_days, 'jobid': jobid}) try: output = sb.check_output(cmd, stderr=sb.STDOUT, shell=True) if output: jobnode = output.strip() jobnode = jobnode.decode() except sb.CalledProcessError: pass return jobnode def get_specific_str(big_str, prefix, suffix): """ Extract a specific length out of a string :param big_str: string to reduce :param prefix: prefix to remove :param suffix: suffix to remove :return: string reduced, return empty string if prefix/suffix not present """ specific_str = big_str if prefix and len(specific_str.split(prefix)) > 1: specific_str = specific_str.split(prefix)[1] if suffix and len(specific_str.split(suffix)) > 1: specific_str = specific_str.split(suffix)[0] if specific_str != big_str: return specific_str else: return '' def command_found(cmd='qsub'): """ Return True if the command was found.""" if True in [os.path.isfile(os.path.join(path, cmd)) and os.ac
cess(os.path.join(path, cmd), os.X_OK) for path in os.environ["PATH"].split(os.pathsep)]: return True return False class PBS(object): # The s
cript file generator class """ PBS class to generate/submit the cluster file to run a task """ def __init__(self, filename, outfile, cmds, walltime_str, mem_mb=2048, ppn=1, env=None, email=None, email_options=DAX_SETTINGS.get_email_opts(), rungroup=None, xnat_host=None, job_template=None): """ Entry point for the PBS class :param filename: filename for the script :param outfile: filepath for the outlogs :param cmds: commands to run in the script :param walltime_str: walltime to set for the script :param mem_mb: memory in mb to set for the script :param ppn: number of processor to set for the script :param env: Environment file to source for the script :param email: email address to set for the script :param email_options: email options to set for the script :param rungroup: group to run job under on the cluster :param xnat_host: set the XNAT_HOST for the job (export) :return: None """ self.filename = filename self.outfile = outfile self.cm
chhsiao90/cheat-ext
cheat_ext/main.py
Python
mit
1,279
0
import argparse from cheat_ext.info import info, ls from cheat_ext.installer import ( install, upgrade, remove ) from cheat_ext.linker import link, unlink def _install(args): install(args.repository) link(args.repository) def _upgrade(args): upgrade(args.repository) link(args.repository) d
ef _remove(args): unlink(args.repository) remove(args.repository) def _info(args): info(args.repository) def _ls(args
): ls() parser = argparse.ArgumentParser(description="cheat extension") subparsers = parser.add_subparsers() install_parser = subparsers.add_parser("install") install_parser.add_argument("repository", type=str) install_parser.set_defaults(func=_install) upgrade_parser = subparsers.add_parser("upgrade") upgrade_parser.add_argument("repository", type=str) upgrade_parser.set_defaults(func=_upgrade) remove_parser = subparsers.add_parser("remove") remove_parser.add_argument("repository", type=str) remove_parser.set_defaults(func=_remove) info_parser = subparsers.add_parser("info") info_parser.add_argument("repository", type=str) info_parser.set_defaults(func=_info) ls_parser = subparsers.add_parser("ls") ls_parser.set_defaults(func=_ls) def main(): options = parser.parse_args() options.func(options)
mkusz/invoke
sites/shared_conf.py
Python
bsd-2-clause
1,189
0
from datetime import datetime from os.path import abspath, join, dirname import alabaster # Alabaster theme + mini-extension html_theme_path = [alabaster.get_path()] extensions = ['alabaster', 'sphinx.ext.intersphinx', 'sphinx.ext.doctest'] # Paths relative to invoking conf.py - not this shared file html_theme = 'alabaster' html_theme_options = { 'description': "Pythonic task execution", 'github_user': 'pyinvoke', 'github_repo': 'invoke', 'analytics_id': 'UA-18486793-3', 'travis_button': True, 'codecov_button': True, } html_sidebars = { '**':
[ 'about.html', 'navigation.html', 'searchbox.html', 'donate.html', ] } # Everything intersphinx's to Python intersphinx_mapping = { 'python': ('https://docs.python.org/2.7/', None), } # Doctest settings doctest_path = [abspath(join(dirname(__file__), '..', 'tests'))] doctest_global_setup =
r""" from _util import MockSubprocess """ # Regular settings project = 'Invoke' year = datetime.now().year copyright = '{} Jeff Forcier'.format(year) master_doc = 'index' templates_path = ['_templates'] exclude_trees = ['_build'] source_suffix = '.rst' default_role = 'obj'
peter17/pijnu
samples/wikiLine.py
Python
gpl-3.0
2,097
0.007153
# -*- coding: utf8 -*- ''' Copyright 2009 Denis Derman <denis.spir@gmail.com> (former developer) Copyright 2011-2012 Peter Potrowl <peter017@gmail.com> (current developer) This file is part of Pijnu. Pijnu is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Pijnu is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Pijnu. If not, see <http://www.gnu.org/licenses/>. ''' """ wikiLine lineChar : [\x20..\x7e] rawChar : [\x20..\x7e !!/!_] DISTINCT : "//" : drop IMPORTANT : "!!" : drop MONOSPACE : "__" : drop rawText : rawChar+ : join distinctText : DISTINCT inline DISTINCT : liftValue importantText : IMPORTANT inline IMPORTANT : liftValue monospaceText : MONOSPACE inline MONOSPACE : liftValue styledText : distinctText / importantText / monospaceText text : styledText / rawText inline : @ text+ """ from pijnu import * # title: wikiLine inline = Recursion() lineChar = Klass(' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~') rawChar = Klass(' "#$%&\'()*+,-.0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^`abcdefghijklmnopqrstuvwxyz{|}~ ') DISTINCT = Word('//')(drop) IMPORTAN
T = Word('!!')(drop) MONOSPACE = Word('__')(drop) rawText = OneOrMore(rawChar)(join) distinctText = Sequence(DISTINCT, inline, DISTINCT)(liftValue) importantText = Sequence(IMPORTANT, inline, IMPORTANT)(liftValue) monospaceText = Sequence(MONOSPACE, inline, MONOSPACE)(liftValue) styledText = Choice(distinctText, importantText,
monospaceText) text = Choice(styledText, rawText) inline **= OneOrMore(text) parser = Parser('wikiLine', locals(), 'inline')
Azure/azure-sdk-for-python
sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_backup_policies_operations.py
Python
mit
31,820
0.004431
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_list_request( subscription_id: str, resource_group_name: str, account_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-08-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "accountName": _SERIALIZER.url("account_name", account_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_request( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-08-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "accountName": _SERIALIZER.url("account_name", account_name, 'str'), "backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2021-08-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "accountName": _SERIALIZER.url("account_name", account_name, 'str'), "backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_update_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2021-08-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/backupPolicies/{backupPolicyName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_
group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), "accountName": _SERIALIZER.url("account_name", account_name, 'str'), "backupPolicyName": _SERIALIZER.url("backup_policy_name", backup_policy_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {})
# type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PATCH", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_delete_request_initial( subscription_id: str, resource_group_name: str, account_name: str, backup_policy_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-08-01" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{
leliel12/handy
handy/logger.py
Python
bsd-3-clause
733
0.006821
# -*- coding: utf-8 -*- import logging, logging.handlers from django.c
onf import settings def get_logger(name, level=logging.INFO, format='[%(asctime)s
] %(message)s', handler=None, filename=None): new_logger = logging.getLogger(name) new_logger.setLevel(level) if not handler: filename = filename or '%s/logs/%s.log' % (settings.HOME_DIR, name) handler = logging.FileHandler(filename) handler.setFormatter(logging.Formatter(format)) new_logger.addHandler(handler) return new_logger if hasattr(settings, 'LOG_FILENAME') and not logger: handler = logging.handlers.TimedRotatingFileHandler(settings.LOG_FILENAME, when = 'midnight') logger = get_logger('default', handler=handler)
jianjunz/online-judge-solutions
leetcode/0750-contain-virus.py
Python
mit
5,673
0.000705
class Solution: def containVirus(self, grid: List[List[int]]) -> int: current_set_number = 1 grid_set = [[0 for i in range(len(grid[0]))] for j in range(len(grid))] set_grid = {} threaten = {} def getAdjacentCellsSet(row, col) -> List[int]: answer = [] if row != 0 and grid_set[row-1][col] != 0 and grid_set[row-1][col] not in answer: answer.append(grid_set[row-1][col]) if col != 0 and grid_set[row][col-1] != 0 and grid_set[row][col-1] not in answer: answer.append(grid_set[row][col-1]) if row != len(grid)-1 and grid_set[row+1][col] != 0 and grid_set[row+1][col] not in answer: answer.append(grid_set[row+1][col]) if col != len(grid[0])-1 and grid_set[row][col+1] != 0 and grid_set[row][col+1] not in answer: answer.append(grid_set[row][col+1]) if -1 in answer: answer.remove(-1) if grid_set[row][col] in answer: answer.remove(grid_set[row][col]) return answer # Merge all regions to the first one. def merge(regions: List[int]): merge_to = regions[0] for i in range(1, len(regions)): for x, y in set_grid[regions[i]]: grid_set[x][y] = merge_to set_grid[merge_to] += set_grid[regions[i]] del set_grid[regions[i]] if regions[i] in threaten: del threaten[regions[i]] for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j] == 1: adjacent_sets = getAdjacentCellsSet(i, j) set_number = 0 if len(adjacent_sets) == 0: set_number = current_set_number current_set_number += 1 elif len(adjacent_sets) == 1: set_number = adjacent_sets[0] else: # Merge merge(adjacent_sets) set_number = adjacent_sets[0] grid_set[i][j] = set_number if set_number not in set_grid: set_grid[set_number] = [] set_grid[set_number].append((i, j)) def adjacentThreatened(x, y): answer = [] if x != 0 and grid_set[x-1][y] == 0: answer.append((x-1, y)) if y != 0 and grid_set[x][y-1] == 0: answer.append((x, y-1)) if x != len(grid_set)-1 and grid_set[x+1][y] == 0: answer.append((x+1, y)) if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0: answer.append((x, y+1)) return answer def threatenCells(): for i in set_grid: if i == 0 or i == -1: continue threatened = set() for x, y in set_grid[i]: threatened = threatened.union(adjacentThreatened(x, y)) threaten[i] = len(threatened) def contain(set_number): wall = 0 for x, y in set_grid[set_number]: grid_set[x][y] = -1 if x != 0 and grid_set[x-1][y] == 0: wall += 1 if y != 0 and grid_set[x][y-1] == 0: wall += 1 if x != len(grid_set)-1 and grid_set[x+1][y] == 0: wall += 1 if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0: wall += 1 del set_grid[set_number] del threaten[set_number] return wall def spread(): to_spread = deque() for _, v in set_grid.items(): to_spread.extend(v) while len(to_spread) > 0: x, y = to_spread.popleft() current_set = grid_set[x][y] if x != 0 and grid_set[x-1][y] == 0: grid_set[x-1][y] = current_set set_grid[current_set].append((x-1, y)) adj = getAdjacentCellsSet(x-1, y) merge([current_set]+adj) if y != 0 and grid_set[x][y-1] == 0: grid_set[x][y-1] = current_set set_grid[current_set].append((x, y-1)) adj = getAdjacentCellsSet(x, y-1) merge([current_set]+adj) if x != len(grid_set)-1 and grid_set[x+1][y] == 0: grid_set[x+1][y] = current_set set_grid[current_set].append((x+1, y)) adj = getAdjacentCellsSet(x+1, y) merge([current_set]+adj) if y != len(grid_set[0])-1 and grid_set[x][y+1] == 0: grid_set[x][y+1] = current_set set_grid[current_set].append((x, y+1)) adj = getAdjacentCellsSet(x, y+1) merge([current_set]+adj) answer = 0 threatenCells() # print(
grid_set) # print(answer) while len(threaten) != 0: # print(threaten) largest_infected = sorted(
threaten.items(), key=lambda x: x[1], reverse=True)[0] answer += contain(largest_infected[0]) spread() # print(grid_set) # print(answer) threatenCells() return answer
BV-DR/foamBazar
pythonScripts/gmshScript/line.py
Python
gpl-3.0
2,331
0.014586
import copy from .point import Point from .misc import * ''' Line is defined using two point(s). ''' class Line(object): _ID_NAME = '_LINE_ID' _DB_NAME = '_EXISTING_LINES' def __init__(self, geom, p0, p1): def check(p): if geom is None: return p if isinstance(p, Point): found,pid = exist(geom,p) if found: return pid else: if geom.get(Point,p) is not None: return p return None assert isinstance(p0, (Point, int, long)) assert isinstance(p1, (Point, int, long)) self.pid = [check(p0), check(p1)] if self.pid[0] is None: raise RuntimeError("Line: Point p0 does not exist in geo-file") if self.pid[1] is None: raise RuntimeError("Line: Point p1 does not exist in geo-file") if self.pid[0] == self.pid[1]: raise RuntimeError("Line: Cannot construct lines of zero length") return # for printing to terminal def __repr__(self): return "l("+remove_bracket(str(self.dataFromKey(self.key())))+")" def code(self, geom): ''' Return the code for use in the geo-file ''' # we do not allow the same line to be added twice # self.exist(...) should return a (new) idx if not found found,idx = exist(geom,self) if found: return '' return '\n'.join([('Line(%d) = {%d,%d};') % (idx,self.pid[0], self.pid[1])]) # NOTE: for uniqueness the sorted idx is used as "key" in the database def key(self, master=False): keystr=remove_bracket(str(sorted(map(abs,self.pid)) + self.pid)) if master: return remove_bracket(str(sorted(map(abs,self.pid)))) return keystr # this is an alternative constructor which can be called directly as "
Line.fromkey(keystr)" @classmethod def fro
mkey(cls, keystr): pid=cls.dataFromKey(keystr) return Line(None, pid[0], pid[1]) @classmethod def masterDBKeys(cls, geom): subkeys=copy.deepcopy(getDB(geom,cls).keys()) for i in range(0,len(subkeys)): tmp=subkeys[i].split(',') subkeys[i]=",".join(tmp[:len(tmp)/2]) return subkeys @staticmethod def dataFromKey(keystr): return [int(i) for i in keystr.split(',')][2:]
infochimps-forks/ezbake-common-python
support/django/setup.py
Python
apache-2.0
1,200
0.000833
#!/usr/bin/env python2 # Copyright (C) 2013-2014 Computer Sciences Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup, find_packages setup( name='ezbake-support-django', vers
ion='2.1', description='Supporting library for integrating Django applications with EzBake.', license='Apache License 2.0', author='EzBake Developers', author_email='developers@ezbake.io', namespace_packages=['ezbake', 'ezbake.support'], packages=find_packages('lib', exclude=['test*']), package_dir={'': 'lib'}, install_requires=[ 'ezbake-security-client==2.1', 'Django>=1.4', 'psycopg2>=2.5', ], )
prodicus/dabble
python/decorators/decorate_with_tags.py
Python
mit
381
0.010499
#!/usr/bin/env python # encoding: utf-8 """An example for a funct
ion returning a function""" def surround(tag1, tag2): def wraps(content): return '{}{}{}'.format(tag1, content, tag2) return wraps def printer(content, transform): return transform(content) print printer("foo bar", surround("<a>", "</a>")) print printer("foo bar", surround('<p>', '
</p>'))
vicky2135/lucious
oscar/lib/python2.7/site-packages/flake8/main/debug.py
Python
bsd-3-clause
2,017
0
"""Module containing the logic for our debugging logic.""" from __future__ import print_function import json import platform import setuptools def print_information(option, option_string, value, parser, option_manager=None): """Print debugging information used in bug reports. :param option: The optparse Option instance. :type option: optparse.Option :param str option_string: The option name :param value: The value passed to the callback parsed from the command-line :param parser: The optparse OptionParser instance :type parser: optparse.OptionParser :param option_manager: The Flake8 OptionManager instance. :type option_manager: flake8.options.manager.OptionManager """ if not option_manager.registered_plugins: # NOTE(sigmavirus24): Flake8 parses options twice. The first time, we # will not have any registered plugins. We can skip this one and only # take action on the second time we're called. return print(json.dumps(information(option_manager), indent=2, sort_keys=True)) raise SystemExit(False) def information(option_manager): """Generate the information to be printed for the bug report.""" return { 'version': option_manager.version, 'plugins': p
lugins_from(option_manager), 'dependencies': dependencies(), 'platform': { 'python_implementation': platform.python_implementation(), 'python_version': p
latform.python_version(), 'system': platform.system(), }, } def plugins_from(option_manager): """Generate the list of plugins installed.""" return [{'plugin': plugin, 'version': version} for (plugin, version) in sorted(option_manager.registered_plugins)] def dependencies(): """Generate the list of dependencies we care about.""" return [{'dependency': 'setuptools', 'version': setuptools.__version__}]
PersonalGenomesOrg/open-humans
private_sharing/migrations/0001_squashed_0034_auto_20160727_2138.py
Python
mit
14,678
0.002861
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-08-01 23:15 import autoslug.fields import common.utils import datetime from django.conf import settings import django.contrib.postgres.fields from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc import open_humans.storage import private_sharing.models class Migration(migrations.Migration): initial = True dependencies = [ ('open_humans', '0003_auto_20151223_1827'), ('oauth2_provider', '__first__'), ('open_humans', '0004_member_badges'), ] operations = [ migrations.CreateModel( name='DataRequestProject', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_study', models.BooleanField(choices=[(True, 'Study'), (False, 'Activity')], help_text='A "study" is doing human subjects research and must have Institutional Review Board approval or equivalent ethics board oversight. Activities can be anything else, e.g. data visualizations.', verbose_name='Is this project a study or an activity?')), ('name', models.CharField(max_length=100, verbose_name='Project name')), ('leader', models.CharField(max_length=100, verbose_name='Leader(s) or principal investigator(s)')), ('organization', models.CharField(max_length=100, verbose_name='Organization or institution')), ('contact_email', models.EmailField(max_length=254, verbose_name='Contact email for your project')), ('info_url', models.URLField(verbose_name='URL for general information about your project')), ('short_description', models.CharField(max_length=140, verbose_name='A short description')), ('long_description', models.TextField(max_length=1000, verbose_name='A long description')), ('active', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active,\nit won\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.')), ('badge_image', models.ImageField(blank=True, help_text="A badge that will be displayed on the user's profile once they've connected your project.", max_length=1024, storage=open_humans.storage.PublicStorage(), upload_to=private_sharing.models.badge_upload_path)), ('request_sources_access', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to")), ('request_message_permission', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text='Permission to send messages to the member. This does not grant access to their email address.', verbose_name='Are you requesting permission to message users?')), ('request_username_access', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text="Access to the member's username. This implicitly enables access to anything the user is publicly sharing on Open Humans. Note that this is potentially sensitive and/or identifying.", verbose_name='Are you requesting Open Humans usernames?')), ('approved', models.BooleanField(default=False)), ('created', models.DateTimeField(auto_now_add=True)), ('last_updated', models.DateTimeField(auto_now=True)), ('api_access_secret', models.CharField(max_length=64)), ], options={ 'verbose_name_plural': 'Data request activities', }, ), migrations.CreateModel( name='DataRequestProjectMember', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user_id_code', models.CharField(max_length=16)), ('message_permission', models.BooleanField()), ('username_shared', models.BooleanField()), ('sources_shared', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)), ('member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member')), ], ), migrations.CreateModel( name='OAuth2DataRequestProject', fields=[ ('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')), ('enrollment_url', models.URLField(help_text="The URL we direct members to if they're interested in sharing data with your project.", verbose_name='Enrollment URL')), ('redirect_url', models.CharField(help_text='The return URL for our "authorization code" OAuth2 grant\n process. You can <a target="_blank" href="">read more about OAuth2\n "authorization code" transactions here</a>.', max_length=256, verbose_name='Redirect URL')), ('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)), ], options={ 'verbose_name': 'OAuth2 data request project', }, bases=('private_sharing.datarequestproject',
),
), migrations.CreateModel( name='OnSiteDataRequestProject', fields=[ ('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')), ('consent_text', models.TextField(help_text='The "informed consent" text that describes your project to Open Humans members.')), ('post_sharing_url', models.URLField(blank=True, help_text='If provided, after authorizing sharing the\nmember will be taken to this URL. If this URL includes "PROJECT_MEMBER_ID"\nwithin it, we will replace that with the member\'s project-specific\nproject_member_id. This allows you to direct them to an external survey you\noperate (e.g. using Google Forms) where a pre-filled project_member_id field\nallows you to connect those responses to corresponding data in Open Humans.', verbose_name='Post-sharing URL')), ], options={ 'verbose_name': 'On-site data request project', }, bases=('private_sharing.datarequestproject',), ), migrations.AddField( model_name='datarequestprojectmember', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_members', to='private_sharing.DataRequestProject'), ), migrations.AddField( model_name='datarequestproject', name='coordinator', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='open_humans.Member'), ), migrations.AlterField( model_name='datarequestproject', name='long_description', field=models.TextField(max_length=1000, verbose_name='A long description (1000 characters max)'), ), migrations.AlterField( model_name='datarequestproject', name='short_description', field=models.CharField(max_length=140, verbose_name='A short description (140 characters max)'), ), migrations.AlterField( model_name='datarequestprojectmember', name='member', field=models.ForeignKey(on_delete=dja
shonenada/crawler
setup.py
Python
mit
359
0
from setuptools import setup
, find_packages setup( name="simple-crawler", version="0.1", url="https://github.com/shonenada/crawler", author="shonenada", author_email="shonenada@gmail.com", description="Simple crawler", zip_safe=True, platforms="any", packages=find_packages(),
install_requires=["requests==2.2.1"], )
denys-duchier/Scolar
notes_users.py
Python
gpl-2.0
1,115
0
# -*- mode: python -*- # -*- coding: iso8859-15 -*- ############################################################################## # # Gestion scolarite IUT # # Copyright (c) 2001 - 2006 Emmanuel Viennet. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License a
s published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Pub
lic License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Emmanuel Viennet emmanuel.viennet@viennet.net # ############################################################################## """Basic User management """ ???????????????
xingjiepan/ss_generator
ss_generator/ca_tracing/alpha_helix.py
Python
bsd-3-clause
11,909
0.005458
import numpy as np from ss_generator import geometry from . import basic D_MEAN = 3.81 D_STD = 0.02 THETA_MEAN = np.radians(91.8) THETA_STD = np.radians(3.35) TAU_MEAN = np.radians(49.5) TAU_STD = np.radians(7.1) def theta_tau_to_rotation_matrix(theta, tau): '''Get the rotation matrix corresponding to the bond angle theta and dihedral tau. ''' return np.dot(geometry.rotation_matrix_from_axis_and_angle(np.array([0, 1, 0]), tau), geometry.rotation_matrix_from_axis_and_angle(np.array([0, 0, 1]), theta - np.pi)) def axis_to_theta_tau(axis): '''Get the bond angle theta and dihedral tau, from a rotation axis. ''' theta = 2 * np.arctan(-axis[1] / axis[0]) tau = 2 * np.arctan(axis[0] / axis[2]) return theta, tau def check_theta_tau(theta, tau): '''Check that if the theta and tau are within the 3 * STD respectively. ''' if theta > THETA_MEAN + 3 * THETA_STD or theta < THETA_MEAN - 3 * THETA_STD: return False if tau > TAU_MEAN + 3 * TAU_STD or tau < TAU_MEAN - 3 * TAU_STD: return False return True def theta_tau_for_nexus(axis, axis_new): '''Given an axis, find a pair of (theta, tau) such that after rotating the coordinate frame by M(theta, tau), the coordinates of the axis in the new frame is axis_new. ''' vx1 = axis[0] vy1 = axis[1] vz1 = axis[2] vx2 = axis_new[0] vy2 = axis_new[1] vz2 = axis_new[2] # Calculate the tau angle t = 1 / (vz2 + vz1) * (vx1 + np.sign(vx1) * np.sqrt(vx1 ** 2 - (vz2 ** 2 - vz1 ** 2))) tau = 2 * np.arctan(t) # Calculate the theta angle s = np.sin(tau) c = np.cos(tau) q = 1 / (vx2 + s * vz1 - c * vx1) * (-vy1 \ - np.sign(vy1) * np.sqrt(vy1 ** 2 - (vx2 ** 2 - (s * vz1 - c * vx1) ** 2))) theta = 2 * np.arctan(q) return theta, tau ### Functions to generate a new helix def generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=False, M_init=None): '''Generate an alpha helix from a list of screw axes. Return a list of Ca coordinates. ''' thetas, taus, M_rot = get_theta_tau_and_rotation_matrix_from_screw_axes( screw_axes, relieve_strain=relieve_strain, M_init=M_init) ca_list = basic.generate_segment_from_internal_coordinates( [D_MEAN] * (len(screw_axes) + 2), thetas, taus) return [np.dot(M_rot, ca) for ca in ca_list] def get_theta_tau_and_rotation_matrix_from_screw_axes(screw_axes, relieve_strain=False, M_init=None): '''Get internal coordinates theta and tau from a list of screw axes. ''' # Get the rotation matrix from the default frame to the first local frame. # Note that there are infinite number of possible matrices to do so. if M_init is None: axis_default = geometry.rotation_matrix_to_axis_and_angle( theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))[0] M_init = geometry.rotation_matrix_to_superimpose_two_vectors( axis_default, screw_axes[0], theta=np.random.uniform(-np.pi, np.pi)) # Get the internal coordinates thetas = [THETA_MEAN] * 2 taus = [TAU_MEAN] M_rot = np.dot(M_init, theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN)) for i in range(1, len(screw_axes)): local_axis = np.dot(np.transpose(M_rot), screw_axes[i]) theta, tau = axis_to_theta_tau(local_axis) # Relieve the strain if relieve_strain and i % 7 == 0 and i + 1 < len(screw_axes): next_axis = np.dot(np.transpose(M_rot), screw_axes[i + 1]) ideal_axis = geometry.rotation_matrix_to_axis_and_angle( theta_tau_to_rotation_matrix(THETA_MEAN, TAU_MEAN))[0] theta, tau = theta_tau_for_nexus(next_axis, ideal_axis) if not check_theta_tau(theta, tau): raise Exception("The value of theta or tau beyond the limits.") M_local = theta_tau_to_rotation_matrix(theta, tau) M_rot = np.dot(M_rot, M_local) thetas.append(theta) taus.append(tau) return thetas, taus, M_init def generate_super_coil(axis, omega, pitch_angle, length): '''Generate a alpha helix super coil. Return a list of Ca coordinates. ''' axis = geometry.normalize(axis) M_rot = geometry.rotation_matrix_from_axis_and_angle(axis, omega) # Get the screw axes axis_perpendicular = None if np.abs(axis[0]) > 0.01: axis_perpendicular = geometry.normalize( np.array([axis[1], -axis[0], 0])) else: axis_perpendicular = geometry.normalize( np.array([0, axis[2], -axis[1]])) screw_seed = np.dot(geometry.rotation_matrix_from_axis_and_angle( axis_perpendicular, pitch_angle), axis) screw_axes = [screw_seed] for i in range(1, length): screw_axes.append(np.dot(M_rot, screw_axes[i - 1])) # Generate the helix return generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True) ### Functions to perturb an existing helix def randomize_a_helix(ca_list, ratio): '''Randomize internal coordinates of a helix. Only int(ratio * len(ca_list)) residues are perturbed. ''' ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list) num_to_perturb = int(ratio * len(ca_list)) res_to_perturb = np.random.permutation(len(ca_list) - 3)[:num_to_perturb] for i in res_to_perturb: theta = np.random.normal(THETA_MEAN, THETA_STD) tau = np.random.normal(TAU_MEAN, TAU_STD) if check_theta_tau(theta, tau): thetas[i] = theta taus[i] = tau perturbed_ca_list = basic.generate_segment_from_internal_coordinates(ds, thetas, taus) # Superimpose the perturbed ca list to the old list M, t = geometry.get_superimpose_transformation(perturbed_ca_list, ca_list) perturbed_ca_list = [ np.dot(M, p) + t for p in perturbed_ca_list] return perturbed_ca_list def shift_helix_phase(ca_list, phase_shift): '''Shift the phase of a helix without changing it's direction. ''' # Get the screw axes screw_axes = [] for i in range(1, len(ca_list) - 2): M1 = geometry.create_frame_from_three_points( ca_list[i - 1], ca_list[i], ca_list[i + 1]) M2 = geometry.create_frame_from_three_points( ca_list[i], ca_list[i + 1], ca_list[i + 2]) screw_axes.append(geometry.rotation_matrix_to_axis_and_angle( np.dot(np.transpose(M2), M1))[0]) # Get the initial rotation matrix for helix generation M1 = geometry.create_frame_from_three_points( ca_list[0], ca_lis
t[1], ca_list[2]) M_init = np.dot(geometry.rotation_matrix_from_axis_and_angle( scr
ew_axes[0], phase_shift), np.transpose(M1)) # Calculate the Ca coordinates shifted_ca_list = generate_alpha_helix_from_screw_axes(screw_axes, relieve_strain=True, M_init=M_init) t = np.mean(ca_list, axis=0) - np.mean(shifted_ca_list, axis=0) for i in range(len(shifted_ca_list)): shifted_ca_list[i] = shifted_ca_list[i] + t return shifted_ca_list def twist_helix(ca_list, axis, pitch_angle, omega, ratio): '''Twist a helix, making it closer to a super coil who is defined by axis, pitch_angle and omega. int(ratio * len(ca_list)) minimum twist units (each with 6 residues) are perturbed. ''' ds, thetas, taus = basic.get_internal_coordinates_from_ca_list(ca_list) M_init = geometry.create_frame_from_three_points( ca_list[0], ca_list[1], ca_list[2]) # Get residues to perturb num_to_perturb = int(ratio * len(ca_list)) res_to_perturb = sorted(np.random.permutation(len(taus) - 2)[:num_to_perturb]) # Get the perturbed internal coordinates for i in range(len(taus)): if i in res_to_perturb: new_thetas, new_taus = twist_minimum_unit(thetas[i + 1: i + 4], taus[i: i + 3], M_init, axis, pitch_angle, omega) if new_thetas is not None: for j in range(3): thetas[i + 1 + j] = new_theta
wesolutki/voter
auth/models.py
Python
gpl-3.0
2,125
0.000471
from common.models import * from common.localization import txt, verbose_names @verbose_names class Patient(models.Model):
# private first_name = models.CharField(max_length=80) last_name = models.CharField(max_length=80) GENDER = ( (txt('M'), txt('male')), (txt
('F'), txt('female')) ) gender = models.CharField(max_length=1, choices=GENDER) BLOOD_TYPE = ( (txt('0Rh-'), txt('0Rh-')), (txt('0Rh+'), txt('0Rh+')), (txt('ARh-'), txt('ARh-')), (txt('ARh+'), txt('ARh+')), (txt('BRh-'), txt('BRh-')), (txt('BRh+'), txt('BRh+')), (txt('ABR-'), txt('ABRh-')), (txt('ABR+'), txt('ABRh+')), ) blood_type = models.CharField(max_length=4, choices=BLOOD_TYPE, blank=True, null=True) birth_date = models.DateField() pesel = PESELField() # address country = models.CharField(max_length=80, default="Polska") city = models.CharField(max_length=80) address = models.CharField(max_length=80, blank=True, null=True) # mailing_address mailing_country = models.CharField(max_length=80, blank=True, null=True) mailing_city = models.CharField(max_length=80, blank=True, null=True) mailing_address = models.CharField(max_length=80, blank=True, null=True) # work job = models.CharField(max_length=80, blank=True, null=True) workplace = models.CharField(max_length=80, blank=True, null=True) # contact cell_phone = models.CharField(max_length=80, blank=True, null=True) landline_phone = models.CharField(max_length=80, blank=True, null=True) email = models.EmailField(blank=True, null=True) # injury info date_of_injury = models.DateField() time_of_injury = models.TimeField(blank=True, null=True) date_of_operation = models.DateField(blank=True, null=True) time_of_operation = models.TimeField(blank=True, null=True) additional_notes = AdditionalNotesField(blank=True, null=True) def __str__(self): return "{0} {1}".format(self.first_name, self.last_name) class Meta: ordering = ('last_name', 'first_name')
FinnStutzenstein/OpenSlides
server/openslides/assignments/migrations/0016_negative_votes.py
Python
mit
2,395
0.000418
# Generated by Django 2.2.15 on 2020-11-24 06:44 from decimal import Decimal import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("assignments", "0015_assignmentvote_delegated_user"), ] operations = [ migrations.AddField( model_name="assignmentpoll", name="db_amount_global_yes", field=models.DecimalField( blank=True, decimal_places=6, default=Decimal("0"), max_digits=15, null=True, validators=[django.core.validators.MinValueValidator(Decimal("-2"))], ), ), migrations.AddField( model_name="assignmentpoll", name="global_yes", field=models.BooleanField(default=True), ), migrations.AlterField( m
odel_name="assignmentpoll", name="pollmethod", field=models.CharField( choices=[ ("votes", "Yes per candidate"), ("N", "No per candidate"), ("YN", "Yes/No per candidate"), ("YNA", "Yes/No/Abstain per candidate"),
], max_length=5, ), ), migrations.AlterField( model_name="assignmentpoll", name="onehundred_percent_base", field=models.CharField( choices=[ ("YN", "Yes/No per candidate"), ("YNA", "Yes/No/Abstain per candidate"), ("Y", "Sum of votes including general No/Abstain"), ("valid", "All valid ballots"), ("cast", "All casted ballots"), ("disabled", "Disabled (no percents)"), ], max_length=8, ), ), migrations.AlterField( model_name="assignmentpoll", name="pollmethod", field=models.CharField( choices=[ ("Y", "Yes per candidate"), ("N", "No per candidate"), ("YN", "Yes/No per candidate"), ("YNA", "Yes/No/Abstain per candidate"), ], max_length=5, ), ), ]
gstiebler/odemis
src/odemis/acq/test/spot_alignment_test.py
Python
gpl-2.0
10,396
0.002982
# -*- coding: utf-8 -*- ''' Created on 25 April 2014 @author: Kimon Tsitsikas Copyright © 2013-2014 Kimon Tsitsikas, Delmic This file is part of Odemis. Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. ''' from __future__ import division from concurrent import futures import logging import math from odemis import model import odemis from odemis import acq from odemis.acq import align, stream from odemis.dataio import hdf5 from odemis.driver.actuator import ConvertStage from odemis.util import test import os import threading import time import unittest from unittest.case import skip import weakref logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)-15s: %(message)s") logging.getLogger().setLevel(logging.DEBUG) CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/" SECOM_LENS_CONFIG = CONFIG_PATH + "sim/secom-sim-lens-align.odm.yaml" # 4x4 class TestAlignment(unittest.TestCase): """ Test Spot Alignment functions """ backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SECOM_LENS_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ebeam = model.getComponent(role="e-beam") cls.sed = model.getComponent(role="se-detector") cls.ccd = model.getComponent(role="ccd") cls.focus = model.getComponent(role="focus") cls.align = model.getComponent(role="align") cls.light = model.getComponent(role="light") cls.light_filter = model.getComponent(role="filter") cls.stage = model.getComponent(role="stage") # Used for OBJECTIVE_MOVE type cls.aligner_xy = ConvertStage("converter-ab", "stage", children={"orig": cls.align}, axes=["b", "a"], rotation=math.radians(45)) @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") # image for FakeCCD self.data = hdf5.read_data("../align/test/one_spot.h5") C, T, Z, Y, X = self.data[0].shape self.data[0].shape = Y, X self.fake_img = self.data[0] # @skip("skip") def test_spot_alignment(self): """ Test AlignSpot """ escan = self.ebeam ccd = self.ccd focus = self.focus f = align.AlignSpot(ccd, self.aligner_xy, escan, focus) dist, vector = f.result() self.assertAlmostEqual(dist, 2.41e-05) # @skip("faster") def test_spot_alignment_cancelled(self): """ Test AlignSpot cancellation """ escan = self.ebeam ccd = self.ccd focus = self.focus f = align.AlignSpot(ccd, self.aligner_xy, escan, focus) time.sleep(0.01) # Cancel almost after the half grid is scanned f.cancel() self.assertTrue(f.cancelled()) self.assertTrue(f.done()) with self.assertRaises(futures.CancelledError): f.result() def on_done(self, future): self.done += 1 def on_progress_update(self, future, past, left): self.past = past self.left = left self.updates += 1 def test_aligned_stream(self): """ Test the AlignedSEMStream """ # Use fake ccd in order to have just one spot ccd = FakeCCD(self, self.align) # first try using the metadata correction st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam, ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD) # we don't really care about the SEM image, so the faster the better self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # start one image acquisition (so it should do the calibration) f = acq.acquire([st]) received, _ = f.result() self.assertTrue(received, "No image received after 30 s") # Check the correction metadata is there md = self.sed.getMetadata() self.assertIn(model.MD_POS_COR, md) # Check the position of the image is correct pos_cor = md[model.MD_POS_COR] pos_dict = self.stage.position.value pos = (pos_dict["x"], pos_dict["y"]) exp_pos = tuple(p - c for p, c in zip(pos, pos_cor)) imd = received[0].metadata self.assertEqual(exp_pos, imd[model.MD_POS]) # Check the calibration doesn't happen again on a second acquisition bad_cor = (-1, -1) # stupid impossible value self.sed.updateMetadata({model.MD_POS_COR: bad_cor}) f = acq.acquire([st]) received, _ = f.result() self.assertTrue(received, "No image received after 10 s") # if calibration has happened (=bad), it has changed the metadata md = self.sed.getMetadata() self.assertEqual(bad_cor, md[model.MD_POS_COR], "metadata has been updated while it shouldn't have") # Check calibration happens again after a stage move f = self.stage.moveRel({"x": 100e-6})
f.result() # make sure the move is over time.slee
p(0.1) # make sure the stream had time to detect position has changed received = st.image.value f = acq.acquire([st]) received, _ = f.result() self.assertTrue(received, "No image received after 30 s") # if calibration has happened (=good), it has changed the metadata md = self.sed.getMetadata() self.assertNotEqual(bad_cor, md[model.MD_POS_COR], "metadata hasn't been updated while it should have") class FakeCCD(model.HwComponent): """ Fake CCD component that returns a spot image """ def __init__(self, testCase, align): super(FakeCCD, self).__init__("testccd", "ccd") self.testCase = testCase self.align = align self.exposureTime = model.FloatContinuous(1, (1e-6, 1000), unit="s") self.binning = model.TupleContinuous((1, 1), [(1, 1), (8, 8)], cls=(int, long, float), unit="") self.resolution = model.ResolutionVA((2160, 2560), [(1, 1), (2160, 2560)]) self.data = CCDDataFlow(self) self._acquisition_thread = None self._acquisition_lock = threading.Lock() self._acquisition_init_lock = threading.Lock() self._acquisition_must_stop = threading.Event() self.fake_img = self.testCase.fake_img def start_acquire(self, callback): with self._acquisition_lock: self._wait_acquisition_stopped() target = self._acquire_thread self._acquisition_thread = threading.Thread(target=target, name="FakeCCD acquire flow thread", args=(callback,)) self._acquisition_thread.start() def stop_acquire(self): with self._acquisition_lock: with self._acquisition_init_lock: self._acquisition_must_stop.set() def _wait_acquisition_stopped(self): """ Waits until the acquisition thread is fully finished _iff_ it was requested to stop. """ # "if" is to not wait if it's already finished
south-coast-science/scs_dfe_eng
tests/gas/afe/afe_test.py
Python
mit
2,350
0.000426
#!/usr/bin/env python3 """ Created on 15 Aug 2016 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) Note: this script uses the Pt1000 temp sensor for temperature compensation. """ import time from scs_core.data.json import JSONify from scs_core.gas.afe_baseline import AFEBaseline from scs_core.gas.afe_calib import AFECalib from scs_core.gas.afe.pt1000_calib import Pt1000Calib from scs_dfe.gas.afe.afe import AFE from scs_dfe.gas.afe.pt1000 import Pt1000 from scs_dfe.interface.interface_conf import InterfaceConf from scs_host.bus.i2c import I2C from scs_host.sys.host import Host # -------------------------------------------------------------------------------------------------------------------- try: I2C.Sensors.open() interface_conf = InterfaceConf.load(Host) print(interface_conf) print("-") interface = interface_conf.interface() print(interface) print("-") pt1000_calib = Pt1000Calib.load(Host) print(pt1000_calib) print("-") pt1000 = Pt1000(pt1000_calib) print(pt1000) print("-") afe_calib = AFECalib.load(Host
) print(afe_calib) print("-") afe_baseline = AFEBaseline.load(Host) print(afe_baseline) print("-") sensors = afe_calib.sensors(afe_baseline) print('\n\n'.join(str(sensor) for sensor in sensors)) print("-") # ---------------------------------------------------------------------------------------------------------------- afe = AFE(interface, pt1000, sensors) print
(afe) print("-") start_time = time.time() temp = afe.sample_pt1000() elapsed = time.time() - start_time print(temp) print("elapsed:%0.3f" % elapsed) print("-") start_time = time.time() sample = afe.sample_station(1) elapsed = time.time() - start_time print("SN1: %s" % sample) print("elapsed:%0.3f" % elapsed) print("-") start_time = time.time() sample = afe.sample_station(4) elapsed = time.time() - start_time print("SN4: %s" % sample) print("elapsed:%0.3f" % elapsed) print("=") start_time = time.time() samples = afe.sample() elapsed = time.time() - start_time print(samples) print("elapsed:%0.3f" % elapsed) print("-") jstr = JSONify.dumps(samples) print(jstr) print("-") finally: I2C.Sensors.close()
rcatlin/ryancatlin-info
Api/app/migrations/0003_remove_tag_articles.py
Python
mit
388
0
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-08-20 01:22 from __future__ import unicode_literals from dj
ango.db import
migrations class Migration(migrations.Migration): dependencies = [ ('app', '0002_auto_20170819_2342'), ] operations = [ migrations.RemoveField( model_name='tag', name='articles', ), ]
cnbird1999/ava
ava/core_identity/models.py
Python
gpl-2.0
4,406
0.001816
from django.db import models from django.core.validators import validate_email, validate_slug, validate_ipv46_address from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from ava.core.models import TimeStampedModel from ava.core_group.models import Group from ava.core_identity.validators import validate_skype, validate_twitter class Identity(TimeStampedModel): # An identity is an online persona that can map to a single person, a group # of people, or an automated service. GROUP = 'GROUP' PERSON = 'PERSON' IDENTITY_TYPE_CHOICES = ( (GROUP, 'Group'), (PERSON, 'Person'), ) name = models.CharField(max_length=100, verbose_name='Name', null=True, blank=True) description = models.TextField(max_length=500, verbose_name='Description', null=True, blank=True) identity_typ
e = models.CharField(max_length=10, choices=IDENTITY_TYPE_CHOICES,
default=PERSON, verbose_name='Identity Type') groups = models.ManyToManyField(Group, blank=True, related_name='identities') def __str__(self): return self.name or '' def get_absolute_url(self): return reverse('identity-detail', kwargs={'pk': self.id}) class Meta: verbose_name = 'identity' verbose_name_plural = 'identities' ordering = ['name'] class Person(TimeStampedModel): first_name = models.CharField(max_length=75, validators=[validate_slug]) surname = models.CharField(max_length=75, validators=[validate_slug]) identity = models.ManyToManyField('Identity', blank=True) def __str__(self): return (self.first_name + " " + self.surname).strip() or '' def get_absolute_url(self): return reverse('person-detail', kwargs={'pk': self.id}) class Meta: verbose_name = 'person' verbose_name_plural = 'people' ordering = ['surname', 'first_name'] class Identifier(TimeStampedModel): """ TODO: DocString """ EMAIL = 'EMAIL' SKYPE = 'SKYPE' IP = 'IPADD' UNAME = 'UNAME' TWITTER = 'TWITTER' NAME = 'NAME' IDENTIFIER_TYPE_CHOICES = ( (EMAIL, 'Email Address'), (SKYPE, 'Skype ID'), (IP, 'IP Address'), (UNAME, 'Username'), (TWITTER, 'Twitter ID'), (NAME, 'Other name'), ) identifier = models.CharField(max_length=100) identifier_type = models.CharField(max_length=10, choices=IDENTIFIER_TYPE_CHOICES, default=EMAIL, verbose_name='Identifier Type') identity = models.ForeignKey('Identity', related_name='identifiers') def __str__(self): return self.identifier or '' def get_absolute_url(self): return reverse('identifier-detail', kwargs={'pk': self.id}) def clean(self): if self.identifier_type is 'EMAIL': try: validate_email(self.identifier) except ValidationError: raise ValidationError('Identifier is not a valid email address') if self.identifier_type is 'IPADD': try: validate_ipv46_address(self.identifier) except ValidationError: raise ValidationError('Identifier is not a valid IPv4/IPv6 address') if self.identifier_type is 'UNAME' or self.identifier_type is 'NAME': try: validate_slug(self.identifier) except ValidationError: raise ValidationError('Identifier is not a valid username or name') if self.identifier_type is 'SKYPE': try: validate_skype(self.identifier) except ValidationError: raise ValidationError('Identifier is not a valid Skype user name') if self.identifier_type is 'TWITTER': try: validate_twitter(self.identifier) except ValidationError: raise ValidationError('Identifier is not a valid Twitter user name') class Meta: unique_together = ("identifier", "identifier_type", "identity") ordering = ['identifier', 'identifier_type']
sebrandon1/nova
nova/tests/unit/test_cinder.py
Python
apache-2.0
9,333
0.000107
# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from cinderclient.v1 import client as cinder_client_v1 from cinderclient.v2 import client as cinder_client_v2 from requests_mock.contrib import fixture from testtools import matchers import nova.conf from nova import context from nova import exception from nova import test from nova.volume import cinder CONF = nova.conf.CONF _image_metadata = { 'kernel_id': 'fake', 'ramdisk_id': 'fake' } _volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38" _instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b" _instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c" _attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405" _attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406" _device = "/dev/vdb" _device_2 = "/dev/vdc" _volume_attachment = \ [{"server_id": _instance_uuid, "attachment_id": _attachment_id, "host_name": "", "volume_id": _volume_id, "device": _device, "id": _volume_id }] _volume_attachment_2 = _volume_attachment _volume_attachment_2.append({"server_id": _instance_uuid_2, "attachment_id": _attachment_id_2, "host_name": "", "volume_id": _volume_id, "device": _device_2, "id": _volume_id}) exp_volume_attachment = collections.OrderedDict() exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id, 'mountpoint': _device} exp_volume_attachment_2 = exp_volume_attachment exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2, 'mountpoint': _device_2} class BaseCinderTestCase(object): def setUp(self): super(BaseCinderTestCase, self).setUp() cinder.reset_globals() self.requests = self.useFixture(fixture.Fixture()) self.api = cinder.API() self.context = context.RequestContext('username', 'project_id', auth_token='token', service_catalog=self.CATALOG) def flags(self, *args, **kwargs): super(BaseCinderTestCase, self).flags(*args, **kwargs) cinder.reset_globals() def create_client(self): return cinder.cinderclient(self.context) def test_context_with_catalog(self): self.assertEqual(self.URL, self.create_client().client.get_endpoint()) def test_cinder_http_retries(self): retries = 42 self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries) def test_cinder_api_ins
ecure(self): # The True/False negation is awkward, but better for the client # to pass us insecure=True and we check verify_cert == False self.flags(insecure=True, group='cinder') self.assertFalse(self.create_client().client.session.verify) def test_cinder_http_timeout(self): timeout = 123 self.flags(timeout=timeout, group='cinder') self.assertEqual(timeout, self.create_client().client.session.timeout) def test_cinder_api_cacert_file(self): cacert = "/etc/ssl/certs/ca-certificates.crt" self.flags(cafile=cacert, group='cinder') self.assertEqual(cacert, self.create_client().client.session.verify) class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase): """Test case for cinder volume v1 api.""" URL = "http://localhost:8776/v1/project_id" CATALOG = [{ "type": "volumev2", "name": "cinderv2", "endpoints": [{"publicURL": URL}] }] def create_client(self): c = super(CinderTestCase, self).create_client() self.assertIsInstance(c, cinder_client_v1.Client) return c def stub_volume(self, **kwargs): volume = { 'display_name': None, 'display_description': None, "attachments": [], "availability_zone": "cinder", "created_at": "2012-09-10T00:00:00.000000", "id": _volume_id, "metadata": {}, "size": 1, "snapshot_id": None, "status": "available", "volume_type": "None", "bootable": "true", "multiattach": "true" } volume.update(kwargs) return volume def test_cinder_endpoint_template(self): endpoint = 'http://other_host:8776/v1/%(project_id)s' self.flags(endpoint_template=endpoint, group='cinder') self.assertEqual('http://other_host:8776/v1/project_id', self.create_client().client.endpoint_override) def test_get_non_existing_volume(self): self.requests.get(self.URL + '/volumes/nonexisting', status_code=404) self.assertRaises(exception.VolumeNotFound, self.api.get, self.context, 'nonexisting') def test_volume_with_image_metadata(self): v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata) m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v}) volume = self.api.get(self.context, '5678') self.assertThat(m.last_request.path, matchers.EndsWith('/volumes/5678')) self.assertIn('volume_image_metadata', volume) self.assertEqual(_image_metadata, volume['volume_image_metadata']) class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase): """Test case for cinder volume v2 api.""" URL = "http://localhost:8776/v2/project_id" CATALOG = [{ "type": "volumev2", "name": "cinder", "endpoints": [{"publicURL": URL}] }] def setUp(self): super(CinderV2TestCase, self).setUp() CONF.set_override('catalog_info', 'volumev2:cinder:publicURL', group='cinder') self.addCleanup(CONF.reset) def create_client(self): c = super(CinderV2TestCase, self).create_client() self.assertIsInstance(c, cinder_client_v2.Client) return c def stub_volume(self, **kwargs): volume = { 'name': None, 'description': None, "attachments": [], "availability_zone": "cinderv2", "created_at": "2013-08-10T00:00:00.000000", "id": _volume_id, "metadata": {}, "size": 1, "snapshot_id": None, "status": "available", "volume_type": "None", "bootable": "true", "multiattach": "true" } volume.update(kwargs) return volume def test_cinder_endpoint_template(self): endpoint = 'http://other_host:8776/v2/%(project_id)s' self.flags(endpoint_template=endpoint, group='cinder') self.assertEqual('http://other_host:8776/v2/project_id', self.create_client().client.endpoint_override) def test_get_non_existing_volume(self): self.requests.get(self.URL + '/volumes/nonexisting', status_code=404) self.assertRaises(exception.VolumeNotFound, self.api.get, self.context, 'nonexisting') def test_volume_with_image_metadata(self): v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata) self.requests.get(self.URL + '/volumes/5678', json={'volume': v}) volume = self.ap
jacebrowning/doorstop
doorstop/core/base.py
Python
lgpl-3.0
11,112
0
# SPDX-License-Identifier: LGPL-3.0-only """Base classes and decorators for the doorstop.core package.""" import abc import functools import os from typing import Dict import yaml from doorstop import common, settings from doorstop.common import DoorstopError, DoorstopInfo, DoorstopWarning log = common.logger(__name__) def add_item(func): """Add and cache the returned item.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): item = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and item.tree: item.tree.vcs.add(item.path) # pylint: disable=W0212 if item not in item.document._items: item.document._items.append(item) if settings.CACHE_ITEMS and item.tree: item.tree._item_cache[item.uid] = item log.trace("cached item: {}".format(item)) # type: ignore return item return wrapped def edit_item(func): """Mark the returned item as modified.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): item = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and item.tree: item.tree.vcs.edit(item.path) return item return wrapped def delete_item(func): """Remove and expunge the returned item.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): item = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and item.tree: item.tree.vcs.delete(item.path) # pylint: disable=W0212 if item in item.document._items: item.document._items.remove(item) if settings.CACHE_ITEMS and item.tree: item.tree._item_cache[item.uid] = None log.trace("expunged item: {}".format(item)) # type: ignore BaseFileObject.delete(item, item.path) return item return wrapped def add_document(func): """Add and cache the returned document.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): document = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and document.tree: document.tree.vcs.add(document.config) # pylint: disable=W0212 if settings.CACHE_DOCUMENTS and document.tree: document.tree._document_cache[document.prefix] = document log.trace("cached document: {}".format(document)) # type: ignore return document return wrapped def edit_document(func): """Mark the returned document as modified.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): document = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and document.tree: document.tree.vcs.edit(document.config) return document return wrapped def delete_document(func): """Remove and expunge the returned document.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): document = func(self, *args, **kwargs) or self if settings.ADDREMOVE_FILES and document.tree: document.tree.vcs.delete(document.config) # pylint: disable=W0212 if settings.CACHE_DOCUMENTS and document.tree: document.tree._document_cache[document.prefix] = None log.trace("expunged document: {}".format(document)) # type: ignore try: os.rmdir(document.path) except OSError: # Directory wasn't empty pass return document return wrapped class BaseValidatable(metaclass=abc.ABCMeta): """Abstract Base Class for objects that can be validated.""" def validate(self, skip=None, document_hook=None, item_hook=None): """Check the object for validity. :param skip: list of document prefixes to skip :param document_hook: function to call for custom document validation :param item_hook: function to call for custom item validation :return: indication that the object is valid """ valid = True # Display all issues for issue in self.get_issues( skip=skip, document_hook=document_hook, item_hook=item_hook ): if isinstance(issue, DoorstopInfo) and not settings.WARN_ALL: log.info(issue) elif isinstance(issue, DoorstopWarning) and not settings.ERROR_ALL: log.warning(issue) else: assert isinstance(issue, DoorstopError) log.error(issue) valid = False # Return the result return valid @abc.abstractmethod def get_issues(self, skip=None, document_hook=None, item_hook=None): """Yield all the objects's issues. :param skip: list of document prefixes to skip :param document_hook: function to call for custom document validation :param item_hook: function to call for custom item validation :return: generator of :class:`~doorstop.common.DoorstopError`, :class:`~doorstop.common.DoorstopWarning`, :class:`~doorstop.common.DoorstopInfo` """ @property def issues(self): """Get a list of the item's issues.""" return list(self.get_issues()) def auto_load(func): """Call self.load() before execution.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): self.load() return func(self, *args, **kwargs) return wrapped def auto_save(func): """Call self.save() after execution.""" @functools.wraps(func) def wrapped(self, *args, **kwargs): result = func(self, *args, **kwargs)
if self.auto: self.save() return result return wrapped class BaseFileObject(metaclass=abc.ABCMeta): """Abstract Base Class for objects whose attributes save to a file. For properties that are saved to a file, decorate their getters with :func:`auto_load` and their setters with :func:`auto_save`. """ auto = True # set to False to delay
automatic save until explicit save def __init__(self): self.path = None self.root = None self._data: Dict[str, str] = {} self._exists = True self._loaded = False def __hash__(self): return hash(self.path) def __eq__(self, other): return isinstance(other, self.__class__) and self.path == other.path def __ne__(self, other): return not self == other @staticmethod def _create(path, name): """Create a new file for the object. :param path: path to new file :param name: humanized name for this file :raises: :class:`~doorstop.common.DoorstopError` if the file already exists """ if os.path.exists(path): raise DoorstopError("{} already exists: {}".format(name, path)) common.create_dirname(path) common.touch(path) @abc.abstractmethod def load(self, reload=False): """Load the object's properties from its file.""" # 1. Start implementations of this method with: if self._loaded and not reload: return # 2. Call self._read() and update properties here # 3. End implementations of this method with: self._loaded = True def _read(self, path): """Read text from the object's file. :param path: path to a text file :return: contexts of text file """ if not self._exists: msg = "cannot read from deleted: {}".format(self.path) raise DoorstopError(msg) return common.read_text(path) @staticmethod def _load(text, path, **kwargs): """Load YAML data from text. :param text: text read from a file :param path: path to the file (for displaying errors) :return: dictionary of YAML data """ return common.load_yaml(text, path, **kwargs) @abc.abstractmethod def save(self): """Format and save the object's properties to its file.
zhaoace/codecraft
python/projects/learnpythonthehardway.org/ex48/setup.py
Python
unlicense
434
0.002304
try: from setuptools import setup ex
cept ImportError: from distutils.core import setup config = { 'description': 'ex48', 'author': 'Zhao, Li', 'url': 'URL to get it at.', 'download_url': 'Where to download it.', 'author_email': 'zhaoace@gmail.com', 'version': '0.1', 'install_requires': ['nose'], 'packages': ['ex48'], 'scripts': [],
'name': 'ex48' } setup(**config)
tanglei528/ceilometer
ceilometer/tests/dispatcher/test_file.py
Python
apache-2.0
3,762
0
# -*- encoding: utf-8 -*- # # Copyright © 2013 IBM Corp # # Author: Tong Li <litong01@us.ibm.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging.handlers import os import tempfile from ceilometer.dispatcher import file from ceilometer.openstack.common.fixture import config from ceilometer.openstack.common import test from ceilometer.publisher import utils class TestDispatcherFile(test.BaseTestCase): def setUp(self): super(TestDispatcherFile, self).setUp() self.CONF = self.useFixture(config.Config()).conf def test_file_dispatcher_with_all_config(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = 50 self.CONF.dispatcher_file.backup_count = 5 dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.metering_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.recor
d_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_path_only(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile
('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = None self.CONF.dispatcher_file.backup_count = None dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.FileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.metering_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.record_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_no_path(self): self.CONF.dispatcher_file.file_path = None dispatcher = file.FileDispatcher(self.CONF) # The log should be None self.assertIsNone(dispatcher.log)
strogo/djpcms
runtests.py
Python
bsd-3-clause
722
0.012465
#!/usr/bin/env python import os import sys from optparse import OptionParser def makeoptions(): parser = OptionParser() parser.add_option("-v", "--verbosity",
type = int, action="store", dest="verbosity", default=1, help="Tests verbosity level, one of 0, 1, 2 or 3") return parser if __name__ == '__main__': import djpcms import sys options, tags = makeoptions().parse_args() verbosity = options.verbosity p = os.path path = p.join(p.split(p.abspath(__file__))[0],'tests') sys.path.insert(0, path) from testrunner impor
t run run(tags, verbosity = verbosity)
iamharshit/ML_works
Photo Painter/NN.py
Python
mit
2,722
0.013226
import tensorflow as tf import numpy as np import cv2 img_original = cv2.imread('jack.jpg') #data.camera() img = cv2.resize(img_original, (64*5,64*5)) # for positions xs = [] # for corresponding colors ys = [] for row_i in range(img.shape[0]): for col_i in range(img.shape[1]): xs.append([row_i, col_i]) ys.append(img[row_i, col_i]) # list->numpy array xs,ys = np.array(xs),np.array(ys) # normalising input img xs = (xs-np.mean(xs))/np.std(xs) # placeholders for input and output X = tf.placeholder(tf.float32, shape=[None, 2], name='X') Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y') #defining weights,bias,non-linearity def linear(X, n_input, n_output, activation=None, scope=None): with tf.variable_scope(scope or "linear"): W = tf.get_variable( name='W', shape=[n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1)) b = tf.get_variable( name='b', shape=[n_output], initializer=tf.constant_initializer()) h = tf.matmul(X, W) + b if activation is not None: h = activation(h) return h #building neural-net with 5 layers n_neurons = [2,64,64,64,64,64,64,3] #defining optimizer def distance(p1, p2): return tf.abs(p1 - p2) #building network current_input = X for layer_i in range(1, len(n_neurons)): current_input = linear( X=current_input, n_input=n_neurons[layer_i - 1], n_output=n_neurons[layer_i], activation=tf.nn.relu if (layer_i+1) < len(n
_neurons) else None, scope='layer_' + str(layer_i)) Y_pred = current_input cost = tf.reduce_mean(tf.reduce_sum(distance(Y_pred,Y),1) ) optimizer = tf.train.AdamOptimizer(0.001).minimize(cost) #training Neural Net n_iterations = 500 batch_size = 50 with tf.Session() as sess: sess.run(tf.initialize_all_variables()) prev_training_cost = 0.0 for it_i in range(n_iter
ations): idxs = np.random.permutation(range(len(xs))) n_batches = len(idxs) // batch_size for batch_i in range(n_batches): idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size] sess.run(optimizer, feed_dict={X: xs[idxs_i], Y: ys[idxs_i] }) training_cost = sess.run(cost, feed_dict={X: xs, Y: ys}) print(it_i, training_cost) if (it_i + 1) % 20 == 0: ys_pred = Y_pred.eval(feed_dict={X: xs}, session=sess) fig, ax = plt.subplots(1, 1) print ys_pred.shape,img.shape print ys_pred img = np.clip(ys_pred.reshape(img.shape), 0, 255).astype(np.uint8) cv2.imwrite("face____" + str(it_i) + ".jpg", img)
alirizakeles/tendenci
tendenci/apps/events/widgets.py
Python
gpl-3.0
4,968
0.00161
from django import forms from django.core.urlresolvers import reverse from django.forms.widgets import RadioFieldRenderer from django.utils.encoding import force_text from django.utils.html import format_html from django.utils.safestring import mark_safe class BootstrapChoiceFieldRenderer(RadioFieldRenderer): """ An object used by RadioSelect to enable customization of radio widgets. """ def render(self): """ Outputs a <div> for this set of choice fields. If an id was given to the field, it is applied to the <di> (each item in the list will get an id of `$id_$i`). """ id_ = self.attrs.get('id', None) start_tag = format_html('<div id="{0}">', id_) if id_ else '<div>' output = [start_tag] for widget in self: output.append(format_html('<div class="radio">{0}</div>', force_text(widget))) output.append('</div>') return mark_safe('\n'.join(output)) class UseCustomRegWidget(forms.MultiWidget): """ This widget is for three fields on event add/edit under Registration: * use_custom_reg_form * reg_form * bind_reg_form_to_conf_only """ def __init__(self, attrs=None, reg_form_choices=None, event_id=None): self.attrs = attrs self.reg_form_choices = reg_form_choices self.event_id = event_id if not self.attrs: self.attrs = {'id': 'use_custom_reg'} self.widgets = ( forms.CheckboxInput(), forms.Select(attrs={'class': 'form-control'}), forms.RadioSelect(renderer=BootstrapChoiceFieldRenderer) ) super(UseCustomRegWidget, self).__init__(self.widgets, attrs) def render(self, name, value, attrs=None): if not isinstance(value, list): value = self.decompress(value) final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) use_custom_reg_form_widget = self.widgets[0] rendered_use_custom_reg_form = self.render_widget( use_custom_reg_form_widget, name, value, final_attrs, 0, id_ ) reg_form_widget = self.widgets[1] reg_form_widget.choices = self.reg_form_choices #reg_form_widget.attrs = {'size':'8'} rendered_reg_form = self.render_widget( reg_form_widget, name, value, final_attrs, 1, id_ ) bind_reg_form_to_conf_only_widget = self.widgets[2] choices = ( ('1', mark_safe('Use one form for all pricings %s' % rendered_reg_form)), ) bind_reg_form_to_conf_only_widget.choices = choices rendered_bind_reg_form_to_conf_only = self.render_widget( bind_reg_form_to_conf_only_widget, name, value, final_attrs, 2, id_ ) rendered_bind_reg_form_to_conf_only = rendered_bind_reg_form_to_conf_only.replace( '%s</label>' % rendered_reg_form, "</label>%s" % rendered_reg_form ) if self.event_id: manage_custom_reg_link = """ <div> <a href="%s" target="_blank">Manage Custom Registration Form</a> </div> """ % reverse('event.event_custom_reg_form_list', args=[self.event_id]) else:
manage_custom_reg_link = '' output_html = """ <div id="t-events-use-customreg-box"> <div id="t-events-use-customreg-checkbox" class="checkbox"> <label for="id_%s_%s">%s Use Custom Registration Form</label> </div> <div id="t-events-one-or-separate-form">%s</div> %s </div> """ % ( name, '0', r
endered_use_custom_reg_form, rendered_bind_reg_form_to_conf_only, manage_custom_reg_link ) return mark_safe(output_html) def render_widget(self, widget, name, value, attrs, index=0, id=None): i = index id_ = id if value: try: widget_value = value[i] except IndexError: self.fields['use_reg_form'].initial = None else: widget_value = None if id_: final_attrs = dict(attrs, id='%s_%s' % (id_, i)) if widget.__class__.__name__.lower() != 'select': classes = final_attrs.get('class', None) if classes: classes = classes.split(' ') classes.remove('form-control') classes = ' '.join(classes) final_attrs['class'] = classes return widget.render(name+'_%s' %i, widget_value, final_attrs) def decompress(self, value): if value: data_list = value.split(',') if data_list[0] == '1': data_list[0] = 'on' return data_list return None
seravok/LPTHW
StudyDrillMath.py
Python
gpl-3.0
590
0
# Prints exactly what the script is about to do print "How many keys are there for the swedish alph
abet?" # Prints the amount of the top row print "The top row has 11 letter keys" # Assigns a value to top top = 11.0 # Prints the amount of the middle row print "The middle row has 11 letter keys" # Assigns a value to middle middle = 11 # Prints the amount of the bottom row print "The bottom row has 7 letter keys" # Assigns a value t
o bottom bottom = 7 # Prints text then the combined value of from the three rows print "The total number of letter keys are ", top + middle + bottom
xhava/hippyvm
testing/test_options.py
Python
mit
3,254
0.001537
import py import re from testing.test_interpreter import BaseTestInterpreter from testing.test_main import TestMain from hippy.main import entry_point class TestOptionsMain(TestMain): def test_version_compare(self, capfd): output = self.run('''<?php $versions = array( '1', '1.0', '1.01', '1.1', '1.10', '1.10b', '1.10.0', '-3.2.1', '1rc.0.2', 'bullshit.rc.9.2beta', ); foreach ($versions as $version) { if (isset($last)) { $comp = version_compare($last, $version); echo $comp; } $last = $version; } ?>''', capfd) assert output == "-1-10-11-11-11" def test_version_compare_with_cmp(self, capfd): output = self.run('''<?php $versions = array( '1', '1.0', '1.01', '1.1', '1.10', '1.10b', '1.10.0', '-3.2.1', '1rc.0.2', 'bullshit.rc.9.2beta', ); $co = array( '=', '==', 'eq', '!=', '<>', 'ne', '>', 'gt', '<', 'lt', '>=', 'ge', '<=', 'le', ); foreach ($versions as $version) { if (isset($last)) { foreach ($co as $c) { $comp = version_compare($last, $version, $c); echo (int)$comp; } } $last = $version; } ?>''', capfd) assert output == "000111001100110001110011001111100000001111000111001100110001111100110000011100110011000111110011000001110011001100011111001100" class TestOptionsFunc(BaseTestInterpreter): def test_get_cfg_var(self): php_version = "6.0" test_value = "test_value" space = self.space def setup_conf(interp): interp.config.ini.update({ 'php_version': space.wrap(php_version), 'test_value': space.wrap(test_value), }) output = self.run(''' echo get_cfg_var('php_version'); echo get_cfg_var('test_value'); ''', extra_func=setup_
conf) assert self.space.str_w(output[0]) == php_version assert self.space.str_w(output[1]) == test_value def test_get_cfg_var2(self): output = self.run(''' echo get_cfg_var(''); echo get_cfg_var(' '); echo get_cfg_var('non_existent_var'); echo get_cfg_var(null); echo get_cfg_var(1); echo get_cfg_var(1.0
); ''') assert all([o == self.space.w_False for o in output]) def test_get_cfg_var3(self): with self.warnings() as w: output = self.run(''' echo get_cfg_var(array(1)); class Test {}; echo get_cfg_var(new Test); ''') assert output[0] == self.space.w_Null assert output[1] == self.space.w_Null assert w[0] == 'Warning: get_cfg_var() ' +\ 'expects parameter 1 to be string, array given' assert w[1] == 'Warning: get_cfg_var() ' +\ 'expects parameter 1 to be string, object given'
ereOn/azmq
tests/unit/test_mechanisms/test_base.py
Python
gpl-3.0
2,577
0
""" Unit tests for the base mechanism class. """ import pytest from azmq.mechanisms.base import Mechanism from azmq.errors import ProtocolError @pytest.mark.asyncio async def test_expect_command(reader): reader.write(b'\x04\x09\x03FOOhello') reader.seek(0) result = await Mechanism._expect_command(reader=reader, name=b'FOO') assert result == b'hello' @pytest.mark.asyncio async def test_expect_command_large(reader): reader.write(b'\x06\x00\x00\x00\x00\x00\x00\x00\x09\x03FOOhello') reader.seek(0) result = await Mechanism._expect_command(reader=reader, name=b'FOO') assert result == b'hello' @pytest.mark.asyncio async def test_expect_command_invalid_size_type(reader): reader.write(b'\x03') reader.seek(0) with pytest.raises(ProtocolError): await Mechanism._expect_command(reader=reader, name=b'FOO') @pytest.mark.asyncio async def test_expect_command_invalid_name_size(reader): reader.write(b'\x04\x09\x04HELOhello') reader.seek(0) with pytest.raises(ProtocolError): await Mechanism._expect_command(reader=reader, name=b'FOO') @pytest.mark.asyncio async def test_expect_command_invalid_name(reader): reader.write(b'\x04\x08\x03BARhello') reader.seek(0) with pytest.raises(ProtocolError): await Mechanism._expect_command(reader=reader, name=b'FOO') @pytest.mark.asyncio async def test_read_frame(reader): reader.write(b'\x00\x03foo') reader.seek(0) async def on_command(name, data): assert False result = await Mechanism.read(reader=reader, on_command=on_command) assert result == (b'foo', True) @pytest.mark.asyncio async def test_read_frame_large(reader): reader.write(b'\x02\x00\x00\x00\x00\x00\x00\x00\x03foo') reader.seek(0) async def on_command(name, data): assert False result = await Mechanism.read(reader=reader, on_command=on_command) assert result == (b'foo', True) @pytest.mark.asyncio async def test_read_command(reader): reader.write(b'\x04\x09\x03BARhello\x00\x03foo') reader.seek(0) async def on_command(name, data): assert name == b'BAR' assert data == b'hello' result = await Mechanism.read(reader=reader, on_command=on_command) assert result == (b'foo', True) @pytest.mark.asyncio async def test_read_invalid_size_type(reader): reader.write
(b'\x09') reader.seek(0) async def on_command(name, data): assert False with pytest.raises(ProtocolError): await Mechanism.read(reader=reader, on_command=
on_command)
LaurentClaessens/phystricks
testing/demonstration/phystricksMBWHooeesXIrsz.py
Python
gpl-3.0
563
0.039146
# -*- coding: utf8 -*- from ph
ystricks import * def MBWHooeesXIrsz(): pspict,fig = SinglePicture("MBWHooeesXIrsz") pspict.dilatation(0.3) l=4 A=Point(0,0) B=Point(l,0) C=Point(l,l) trig=Polygon(A,B,C) trig.put_mark(0.2,pspict=pspict) trig.edges[0].put_code(n=2,d=0.1,l=0.2,pspict=pspict) trig.edges[1].put_code(n=2,d=0.1,l=0.2,pspict=pspict) no_symbol(trig.vertices) pspict.DrawGraphs(trig) pspict.comment="V
érifier la longueur des codages." fig.no_figure() fig.conclude() fig.write_the_file()
manxueitp/cozmo-test
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
Python
mit
6,646
0.003624
#!/usr/bin/env python3 # Copyright (c) 2016 Anki, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the file LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''"If This Then That" Gmail example This example demonstrates how "If This Then That" (http://ifttt.com) can be used make Cozmo respond when a Gmail account receives an email. Instructions below will lead you through setting up an applet on the IFTTT website. When the applet trigger is called (which sends a web request received by the web server started in this example), Cozmo will play an animation, speak the email sender's name and show a mailbox image on his face. Please place Cozmo on the charger for this example. When necessary, he will be rolled off and back on. Follow these steps to set up and run the example: 1) Provide a a static ip, URL or similar that can be reached from the If This Then That server. One easy way to do this is with ngrok, which sets up a secure tunnel to localhost running on your machine. To set up ngrok: a) Follow instructions here to download and install: https://ngrok.com/download b) Run this command to create a secure public URL for port 8080: ./ngrok http 8080 c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io). You will use this address in your applet, below. WARNING: Using ngrok exposes your local web server to the internet. See the ngrok documentation for more information: https://ngrok.com/docs 2) Set up your applet on the "If This Then That" website. a) Sign up and sign into https://ifttt.com b) Create an applet: https://ifttt.com/create c) Set up your trigger. 1. Click "this". 2. Select "Gmail" as your service. If prompted, click "Connect", select your Gmail account, and click “Allow” to provide permissions to IFTTT for your email account. Click "Done". 3. Under "Choose a Trigger", select “Any new email in inbox". d) Set up your action. 1. Click “that". 2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted. 3. Click “Make a web request" and fill out the fields as follows. Remember your publicly accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field, followed by "/iftttGmail" as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json Body: {"FromAddress":"{{FromAddress}}"} 5. Click “Create Action" then “Finish". 3) Test your applet. a) Run this script at the command line: ./ifttt_gmail.py b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet was checked. c) Send an email to the Gmail account in your recipe d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that the email was received and send a web request to the ifttt_gmail.py script. e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower his lift, announce the email, and then show a mailbox image on his face. ''' import asyncio import re import sys try: from aiohttp import web except ImportError: sys.exit("Cannot import from aiohttp. Do `pip3 install --user aioht
tp` to install") import cozmo from common impo
rt IFTTTRobot app = web.Application() async def serve_gmail(request): '''Define an HTTP POST handler for receiving requests from If This Then That. You may modify this method to change how Cozmo reacts to the email being received. ''' json_object = await request.json() # Extract the name of the email sender. from_email_address = json_object["FromAddress"] # Use a regular expression to break apart pieces of the email address match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address) email_local_part = match_object.group(1) robot = request.app['robot'] async def read_name(): try: async with robot.perform_off_charger(): '''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.''' await robot.get_in_position() # First, have Cozmo play animation "ID_pokedB", which tells # Cozmo to raise and lower his lift. To change the animation, # you may replace "ID_pokedB" with another animation. Run # remote_control_cozmo.py to see a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak the name of the email sender. await robot.say_text("Email from " + email_local_part).wait_for_completed() # Last, have Cozmo display an email image on his face. robot.display_image_file_on_face("../face_images/ifttt_gmail.png") except cozmo.RobotBusy: cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address) # Perform Cozmo's task in the background so the HTTP server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text="OK") # Attach the function as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our custom robot class with extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for the robot to become available and add it to the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit("A connection error occurred: %s" % e) web.run_app(app)