repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
hanlind/nova
|
nova/tests/unit/volume/encryptors/test_luks.py
|
Python
|
apache-2.0
| 10,988
| 0.000091
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
from castellan.common.objects import symmetric_key as key
import mock
from oslo_concurrency import processutils
import uuid
from nova.tests.unit.volume.encryptors import test_cryptsetup
from nova.volume.encryptors import luks
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self, connection_info):
return luks.LuksEncryptor(connection_info)
@mock.patch('nova.utils.execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.volume.encryptors.luks.LOG')
@mock.patch('nova.utils.execute')
def test_is_luks_with_error(self, mock_execute, mock_log):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = \
processutils.ProcessExecutionError(exit_code=1,
stderr=error_msg)
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
self.assertEqual(1, mock_log.warning.call_count) # warning logged
@mock.patch('nova.utils.execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path,
process_input='passphrase',
run_as_root=True, check_exit_code=True, attempts=3),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
processutils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path, process_input=fake_key,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(5, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
mock.DEFAULT, # isLuks
]
self.assertRaises(processutils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
def test_get_mangled_passphrase(self):
# Confirm that a mangled passphrase is provided as per bug#1633518
unmangled_raw_key = bytes(binascii.unhexlify('0725230b'))
symmetric_key = key.SymmetricKey('AES',
|
len
|
(unmangled_raw_key) * 8,
unmangled_raw_key)
unmangled_encoded_key = symmetric_key.get_encoded()
encryptor = luks.LuksEncryptor(self.connection_info)
self.assertEqual(encryptor._get_mangled_passphrase(
|
kimegitee/python-koans
|
python3/koans/about_with_statements.py
|
Python
|
mit
| 3,602
| 0.004997
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutSandwichCode in the Ruby Koans
#
from runner.koan import *
import re # For regular expression string comparisons
class AboutWithStatements(Koan):
def count_lines(self, file_name):
try:
file = open(file_name)
try:
return len(file.readlines())
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_counting_lines(self):
self.assertEqual(4, self.count_lines("example_file.txt"))
# ------------------------------------------------------------------
def find_line(self, file_name):
try:
file = open(file_name)
try:
for line in file.readlines():
match = re.search('e', line)
if match:
return line
finally:
file.close()
except IOError:
# should never happen
self.fail()
def test_finding_lines(self):
self.assertEqual('test\n', self.find_line("example_file.txt"))
## ------------------------------------------------------------------
## THINK ABOUT IT:
##
## The count_lines and find_line are similar, and yet different.
## They both follow the pattern of "sandwich code".
##
## Sandwich code is code that comes in three parts: (1) the top slice
## of bread, (2) the meat, and (3) the bottom slice of bread.
## The bread part of the sandwich almost always goes together, but
## the meat part changes all the time.
##
## Because the changing part of the sandwich code is in the middle,
## abstracting the top and bottom bread slices to a library can be
## difficult in many languages.
##
## (Aside for C++ programmers: The idiom of capturing allocated
## pointers in a smart pointer constructor is an attempt to deal with
## the problem of sandwich code for resource allocation.)
##
## Python solves the problem using Context Managers. Consider the
## following code:
##
class FileContextManager():
def __init__(self, file_name):
self._file_name = file_name
self._file = None
def __enter__(self):
self._file = open(self._file_name)
return self._file
def __exit__(self, cls, value, tb):
self._file.close()
|
# Now we write:
def count_lines2(self, file_name):
with self.FileContextManager(file_name) as file:
return len(file.readlines())
def test_counting_lines2(self):
self.assertEqual(4, self.count_lines2("example_file.txt"))
# ------------------------------------------------------------------
def find_line2(self, file_name):
# Rewrite find_line using the Context Manager.
with self.FileContextManager(file_name) as file:
|
for line in file.readlines():
if re.search('e', line):
return line
def test_finding_lines2(self):
self.assertEqual('test\n', self.find_line2("example_file.txt"))
self.assertNotEqual('a', self.find_line2("example_file.txt"))
# ------------------------------------------------------------------
def count_lines3(self, file_name):
with open(file_name) as file:
return len(file.readlines())
def test_open_already_has_its_own_built_in_context_manager(self):
self.assertEqual(4, self.count_lines3("example_file.txt"))
|
alan-wu/neon
|
src/opencmiss/neon/ui/editors/spectrumeditorwidget.py
|
Python
|
apache-2.0
| 26,338
| 0.002202
|
'''
Copyright 2015 University of Auckland
Licensed under t
|
he Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
|
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from PySide import QtCore, QtGui
from opencmiss.zinc.sceneviewer import Sceneviewer
from opencmiss.zinc.spectrum import Spectrum, Spectrumcomponent
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.neon.ui.editors.ui_spectrumeditorwidget import Ui_SpectrumEditorWidget
from opencmiss.neon.settings.mainsettings import FLOAT_STRING_FORMAT
from opencmiss.neon.core.neonlogger import NeonLogger
COMPONENT_NAME_FORMAT = '{:d}. '
SPECTRUM_DATA_ROLE = QtCore.Qt.UserRole + 1
class SpectrumEditorWidget(QtGui.QWidget):
def __init__(self, parent=None, shared_context=None):
super(SpectrumEditorWidget, self).__init__(parent)
self._ui = Ui_SpectrumEditorWidget()
self._ui.setupUi(self, shared_context)
self._ui.comboBoxColourMap.addItems(extractColourMappingEnum())
self._ui.comboBoxScale.addItems(extractScaleTypeEnum())
self._spectrums = None
self._zincContext = None
self._selected_spectrum_row = -1
self._selected_spectrum_components_row = -1
self._previewZincRegion = None
self._previewZincScene = None
self._spectrummodulenotifier = None
self._currentSpectrumName = None
self._updateUi()
self._makeConnections()
def _makeConnections(self):
self._ui.pushButtonAddSpectrum.clicked.connect(self._addSpectrumClicked)
self._ui.pushButtonDeleteSpectrum.clicked.connect(self._deleteSpectrumClicked)
self._ui.pushButtonAddSpectrumComponent.clicked.connect(self._addSpectrumComponentClicked)
self._ui.pushButtonDeleteSpectrumComponent.clicked.connect(self._deleteSpectrumComponentClicked)
self._ui.listWidgetSpectrums.itemClicked.connect(self._spectrumItemClicked)
self._ui.listWidgetSpectrums.itemChanged.connect(self._spectrumChanged)
self._ui.checkBoxOverwrite.clicked.connect(self._overwriteClicked)
self._ui.checkBoxDefault.clicked.connect(self._defaultClicked)
self._ui.pushButtonAutorange.clicked.connect(self._autorangeClicked)
self._ui.listWidgetSpectrumComponents.itemClicked.connect(self._spectrumComponentItemClicked)
self._ui.pushButtonMoveDownSpectrumComponent.clicked.connect(self._moveDownSpectrumComponentClicked)
self._ui.pushButtonMoveUpSpectrumComponent.clicked.connect(self._moveUpSpectrumComponentClicked)
self._ui.comboBoxColourMap.currentIndexChanged.connect(self._colourMapIndexChanged)
self._ui.pushButtonReverseColours.clicked.connect(self._reverseColoursClicked)
self._ui.spinBoxDataFieldComponent.valueChanged.connect(self._dataFieldComponentValueChanged)
self._ui.lineEditDataRangeMin.editingFinished.connect(self._dataRangeMinEntered)
self._ui.lineEditDataRangeMax.editingFinished.connect(self._dataRangeMaxEntered)
self._ui.lineEditColourRangeMin.editingFinished.connect(self._colourRangeMinEntered)
self._ui.lineEditColourRangeMax.editingFinished.connect(self._colourRangeMaxEntered)
self._ui.checkBoxExtendBelow.clicked.connect(self._extendBelowClicked)
self._ui.checkBoxExtendAbove.clicked.connect(self._extendAboveClicked)
self._ui.checkBoxFixMinimum.clicked.connect(self._fixMinimumClicked)
self._ui.checkBoxFixMaximum.clicked.connect(self._fixMaximumClicked)
self._ui.comboBoxScale.currentIndexChanged.connect(self._scaleIndexChanged)
self._ui.lineEditExaggeration.editingFinished.connect(self._exaggerationEntered)
self._ui.sceneviewerWidgetPreview.graphicsInitialized.connect(self._graphicsInitialised)
def _getCurrentSpectrum(self):
currentItem = self._ui.listWidgetSpectrums.currentItem()
if not currentItem:
return None
name = currentItem.text()
sm = self._zincContext.getSpectrummodule()
spectrum = sm.findSpectrumByName(name)
if spectrum.isValid():
return spectrum
return None
def _clearSpectrumUi(self):
self._ui.listWidgetSpectrumComponents.clear()
self._ui.checkBoxOverwrite.setChecked(False)
self._clearSpectrumComponentUi()
def _clearSpectrumComponentUi(self):
self._ui.comboBoxColourMap.setCurrentIndex(0)
self._ui.comboBoxScale.setCurrentIndex(0)
def _spectrummoduleCallback(self, spectrummoduleevent):
'''
Callback for change in spectrums; may need to rebuild spectrum list
'''
changeSummary = spectrummoduleevent.getSummarySpectrumChangeFlags()
# print("Spectrum Editor: _spectrummoduleCallback changeSummary " + str(changeSummary))
if 0 != (changeSummary & (Spectrum.CHANGE_FLAG_IDENTIFIER | Spectrum.CHANGE_FLAG_ADD | Spectrum.CHANGE_FLAG_REMOVE)):
self._buildSpectrumList()
def _buildSpectrumList(self):
sm = self._zincContext.getSpectrummodule()
si = sm.createSpectrumiterator()
lws = self._ui.listWidgetSpectrums
lws.clear()
s = si.next()
selectedItem = None
while s.isValid():
name = s.getName()
item = createSpectrumListItem(name)
lws.addItem(item)
if name == self._currentSpectrumName:
selectedItem = item
s = si.next()
if not selectedItem:
if lws.count() > 0:
selectedItem = lws.item(0)
self._currentSpectrumName = selectedItem.text()
else:
self._currentSpectrumName = None
if selectedItem:
lws.setCurrentItem(selectedItem)
selectedItem.setSelected(True)
self._updateUi()
def _updateUi(self):
self._clearSpectrumUi()
spectrum = self._getCurrentSpectrum()
spectrum_selected = spectrum is not None
self._ui.pushButtonDeleteSpectrum.setEnabled(spectrum_selected)
self._ui.sceneviewerWidgetPreview.setEnabled(spectrum_selected)
self._ui.groupBoxSpectrumProperties.setEnabled(spectrum_selected)
self._ui.groupBoxComponents.setEnabled(spectrum_selected)
self._ui.groupBoxComponentProperties.setEnabled(spectrum_selected)
if spectrum_selected:
# Only one spectrum can be selected at a time.
sm = self._zincContext.getSpectrummodule()
is_default_spectrum = (spectrum == sm.getDefaultSpectrum())
self._ui.pushButtonDeleteSpectrum.setEnabled(not is_default_spectrum)
self._ui.checkBoxDefault.setChecked(is_default_spectrum)
self._ui.checkBoxOverwrite.setChecked(spectrum.isMaterialOverwrite())
sc = spectrum.getFirstSpectrumcomponent()
while sc.isValid():
count = self._ui.listWidgetSpectrumComponents.count() + 1
self._ui.listWidgetSpectrumComponents.addItem(createItem(getComponentString(sc, count), sc))
sc = spectrum.getNextSpectrumcomponent(sc)
if self._ui.listWidgetSpectrumComponents.count():
self._ui.listWidgetSpectrumComponents.setCurrentRow(0)
self._previewSpectrum(spectrum)
self._updateComponentUi()
def _previewSpectrum(self, spectrum):
if self._previewZincScene is None:
return
if (spectrum is None) or (not spectrum.isValid()):
self._previewZincScene.removeAllGraphics()
return
points = self._previewZincScene.getFirstGraphics()
self._previewZincScene.beginChange()
if not points.isValid():
points = self._previewZincScene.createGraphics
|
whiterabbitengine/fifeplusplus
|
tools/editor/scripts/mapcontroller.py
|
Python
|
lgpl-2.1
| 18,203
| 0.02983
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import editor
import pdb
import math
from fife import fife
import editor
import events
import undomanager
from fife.extensions.pychan.tools import callbackWithArguments as cbwa
from fife.extensions.fife_settings import Setting
TDS = Setting(app_name="editor")
class MapController(object):
""" MapController provides an interface for editing maps """
def __init__(self, map):
self._editor = editor.getEditor()
self._engine = self._editor.getEngine()
self._camera = None # currently selected camera
self._layer = None # currently selected layer
self._selection = [] # currently selected cells
self._single_instance = False # flag to force selection of one single instance
self._instance = None # current selected single instance
self._map = None
self._undo = False
self._undomanager = undomanager.UndoManager()
undomanager.preUndo.connect(self._startUndo, sender=self._undomanager)
undomanager.preRedo.connect(self._startUndo, sender=self._undomanager)
undomanager.postUndo.connect(self._endUndo, sender=self._undomanager)
undomanager.postRedo.connect(self._endUndo, sender=self._undomanager)
self.debug = False
self._settings = TDS
self.overwriteInstances = True # Remove instances on cell before placing new instance
self.importList = {} # used to keep track of current imports needed by the map
if map is not None:
self.setMap(map.getId())
def cleanUp(self):
undomanager.preUndo.disconnect(self._startUndo, sender=self._undomanager)
undomanager.preRedo.disconnect(self._startUndo, sender=self._undomanager)
undomanager.postUndo.disconnect(self._endUndo, sender=self._undomanager)
undomanager.postRedo.disconnect(self._endUndo, sender=self._undomanager)
self._undomanager.clear()
self._editor = None
self._engine = None
self._map = None
self._selection = []
self._layer = None
self._camera = None
def setMap(self, mapid):
""" Set the map to be edited """
self._camera = None
self._map = None
self._layer = None
self._selection = []
self._map = self._engine.getModel().getMap(mapid)
if not self._map.getLayers():
raise AttributeError('Editor error: map ' + self._map.getId() + ' has no layers. Cannot edit.')
for cam in self._map.getCameras():
if cam.getLocationRef().getMap().getId() == self._map.getId():
self._camera = cam
break
# remove imports from previous map
self.importList.clear()
# keep track of imports that were loaded with the map
for layer in self._map.getLayers():
for i in layer.getInstances():
self.incrementReferenceCountForObject(i.getObject())
self._layer = self._map.getLayers()[0]
gridrenderer = fife.GridRenderer.getInstance(self._camera)
gridrenderer.activateAllLayers(self._map)
color = str(self._settings.get("Colors", "Grid", "0,255,0"))
gridrenderer.setColor(*[int(c) for c in color.split(',')])
blockrenderer = fife.BlockingInfoRenderer.getInstance(self._camera)
blockrenderer.activateAllLayers(self._map)
color = str(self._settings.get("Colors", "Blocking", "0,255,0"))
blockrenderer.setColor(*[int(c) for c in color.split(',')])
coordinaterenderer = fife.CoordinateRenderer.getInstance(self._camera)
coordinaterenderer.activateAllLayers(self._map)
color = str(self._settings.get("Colors", "Coordinate", "255,255,255"))
coordinaterenderer.setColor(*[int(c) for c in color.split(',')])
cellrenderer = fife.CellSelectionRenderer.getInstance(self._camera)
cellrenderer.activateAllLayers(self._map)
color = str(self._settings.get("Colors", "CellSelection", "255,0,0"))
cellrenderer.setColor(*[int(c) for c in color.split(',')])
cellrenderer.setEnabled(True)
def getMap(self):
return self._map
def selectLayer(self, layerid):
""" Select layer to be edited """
self.deselectSelection()
self._layer = None
layers = [l for l in self._map.getLayers() if l.getId() == layerid]
if len(layers) == 1:
self._layer = layers[0]
def deselectSelection(self):
""" Deselects all selected cells """
if not self._camera:
if self.debug: print 'No camera bind yet, cannot select any cell'
return
self._selection = []
fife.CellSelectionRenderer.getInstance(self._camera).reset()
def clearSelection(self):
""" Removes all instances on selected cells """
instances = self.getInstancesFromSelection()
self._undomanager.startGroup("Cleared selection")
self.removeInstances(instances)
self._undomanager.endGroup()
def fillSelection(self, object):
""" Adds an instance of object on each selected cell """
self._undomanager.startGroup("Filled selection")
for loc in self._selection:
self.placeInstance(loc.getLayerCoordinates(), object)
self._undomanager.endGroup()
def selectCell(self, screenx, screeny):
""" Selects a cell at a position on screen """
if not self._camera:
if self.debug: print 'No camera bind yet, cannot select any cell'
return
if not self._layer:
if self.debug: print 'No layer assigned in selectCell'
return
loc = fife.Location(self._layer)
loc.setLayerCoordinates(self._layer.getCellGrid().toLayerCoordinates(self.screenToMapCoordinates(screenx, screeny)))
for i in self._selection:
if loc.getLayerCoordinates() == i.getLayerCoordinates(): return
self._selection.append(loc)
fife.CellSelectionRenderer.getInstance(self._camera).selectLocation(loc)
def deselectCell(self, screenx, screeny):
""" Deselects a cell at a position on screen """
if not self._camera:
if self.debug: print 'No camera bind yet, cannot select any cell'
return
if not self._layer:
if self.debug: print 'No layer assigned in selectCell'
return
loc = fife.Location(self._layer)
loc.setLayerCoordinates(self._layer.getCellGrid().toLayerCoordinates(self.screenToMapCoordinates(screenx, screeny)))
for i in self._selection[:]:
if loc.getLayerCoordinates() == i.getLayerCoordinates():
self._selection.remove(i)
fife.CellSelectionRenderer.getInstance(self._camera).deselectLocation(i)
return
def getLocationsFromSelection(self):
""" Returns all locations in the selected cells """
return self._selection
def getInstance(self):
""" Returns a single instance packed into a list (compat to API) """
if self._instance:
return [self._instance, ]
return []
def getInstancesFromSelection(self):
""" Returns all instances in the selected cells """
|
instances = []
for loc in self._selection:
instances.extend(self.getInstancesFromPosition(loc.getExactLayerCoordinates()
|
, loc.getLayer()))
return instances
def getInstancesFromPosition(self, position, layer=None):
""" Returns all instances on a specified position
Interprets ivar _single_instance and returns only the
first instance if flag is set to True
"""
if not self._layer and not layer:
if self.debug: print 'No layer assigned in getInstancesFromPosition'
return
if not position:
if self.debug:
|
izacus/slo_pos
|
tests.py
|
Python
|
lgpl-2.1
| 987
| 0.007136
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import slopos
class TestTagger(unittest.TestCase):
def setUp(self):
slopos.load_from_path("slopos/sl-tagger.pickle")
def testSentenceTagging(self):
tagged = slopos.tag("To je test.")
self.assertEqual(tagged, [('To', 'ZK-SEI'), ('je', 'GP-STE-N'), ('test', 'SOMETN'), ('.', '-None-')])
def testListTagging(self):
tagged = slopos.tag(["To", "je", "test"])
self.assert
|
Equal(tagged, [('To', 'ZK-SEI'), ('je', 'GP-STE-N'), ('test', 'SOMETN')])
def testUnicodeSentenceTagging(self):
tagged = slopos.tag("V kožuščku zelene lisice stopiclja jezen otrok.")
self.assertEqual(tagged, [('V', 'DM'), ('kožuščku', 'SOMEM'), ('zelene', 'PPNZER'),
|
('lisice', 'SOZER,'),
('stopiclja', 'GGNSTE'), ('jezen', 'PPNMEIN'), ('otrok', 'SOMEI.'), ('.', '-None-')])
if __name__ == "__main__":
unittest.main()
|
stackforge/tricircle
|
tricircle/tests/unit/network/test_central_trunk_plugin.py
|
Python
|
apache-2.0
| 24,196
| 0
|
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import six
import unittest
from six.moves import xrange
import neutron.conf.common as q_config
from neutron.db import db_base_plugin_v2
from neutron_lib.api.definitions import portbindings
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils
from oslo_config import cfg
from oslo_utils import uuidutils
from tricircle.common import client
from tricircle.common import constants
from tricircle.common import context
import tricircle.db.api as db_api
from tricircle.db import core
from tricircle.db import models
from tricircle.network import central_plugin
import tricircle.network.central_trunk_plugin as trunk_plugin
from tricircle.network import helper
import tricircle.tests.unit.utils as test_utils
from tricircle.xjob import xmanager
_resource_store = test_utils.get_resource_store()
TOP_TRUNKS = _resource_store.TOP_TRUNKS
TOP_SUBPORTS = _resource_store.TOP_SUBPORTS
TOP_PORTS = _resource_store.TOP_PORTS
BOTTOM1_TRUNKS = _resource_store.BOTTOM1_TRUNKS
BOTTOM2_TRUNKS = _resource_store.BOTTOM2_TRUNKS
BOTTOM1_SUBPORTS = _resource_store.BOTTOM1_SUBPORTS
BOTTOM2_SUBPORTS = _resource_store.BOTTOM2_SUBPORTS
BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
TEST_TENANT_ID = test_utils.TEST_TENANT_ID
class FakeBaseXManager(xmanager.XManager):
def __init__(self):
self.clients = {constants.TOP: client.Client()}
def _get_client(self, region_name=None):
return FakeClient(region_name)
class FakeXManager(FakeBaseXManager):
def __init__(self, fake_plugin):
super(FakeXManager, self).__init__()
self.xjob_handler = FakeBaseRPCAPI(fake_plugin)
self.helper = helper.NetworkHelper()
class FakeBaseRPCAPI(object):
def __init__(self, fake_plugin):
self.xmanager = FakeBaseXManager()
def sync_trunk(self, ctxt, project_id, trunk_id, pod_id):
combine_id = '%s#%s' % (pod_id, trunk_id)
self.xmanager.sync_trunk(
ctxt, payload={constants.JT_TRUNK_SYNC: combine_id})
def configure_security_group_rules(self, ctxt, project_id):
pass
class FakeRPCAPI(FakeBaseRPCAPI):
def __init__(self, fake_plugin):
self.xmanager = FakeXManager(fake_plugin)
class FakeNeutronClient(test_utils.FakeNeutronClient):
_resource = 'trunk'
trunks_path = ''
class FakeClient(test_utils.FakeClient):
def __init__(self, region_name=None):
super(FakeClient, self).__init__(region_name)
self.client = FakeNeutronClient(self.region_name)
def get_native_client(self, resource, ctx):
return self.client
def get_trunks(self, ctx, trunk_id):
return self.get_resource(constants.RT_TRUNK, ctx, trunk_id)
def update_trunks(self, context, trunk_id, trunk):
self.update_resources(constants.RT_TRUNK, context, trunk_id, trunk)
def delete_trunks(self, context, trunk_id):
self.delete_resources(constants.RT_TRUNK, context, trunk_id)
def action_trunks(self, ctx, action, resource_id, body):
if self.region_name == 'pod_1':
btm_trunks = BOTTOM1_TRUNKS
else:
btm_trunks = BOTTOM2_TRUNKS
for trunk in btm_trunks:
if trunk['id'] == resource_id:
subports = body['sub_ports']
if action == 'add_subports':
for subport in subports:
subport['trunk_id'] = resource_id
trunk['sub_ports'].extend(subports)
return
elif action == 'remove_subports':
for subport in subports:
for b_subport in trunk['sub_ports']:
if subport['port_id'] == b_subport['port_id']:
trunk['sub_ports'].remove(b_subport)
return
def list_trunks(self, ctx, filters=None):
filter_dict = {}
filters = filters or []
for query_filter in filters:
key = query_filter['key']
# when querying trunks, "fields" is passed in the query string to
# ask the server to only return necessary fields, which can reduce
# the data being transferred. In test, we just return all the
# fields since there's no need to optimize
if key != 'fields':
value = query_filter['value']
filter_dict[key] = value
return self.client.get('', filter_dict)['trunks']
def get_ports(self, ctx, port_id):
pass
def list_ports(self, ctx, filters=None):
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
_filters = {}
for f in filters:
_filters[f['key']] = [f['value']]
return fake_plugin.get_trunk_subports(q_ctx, _filters)
def create_ports(self, ctx, body):
if 'ports' in body:
ret = []
for port in body['ports']:
p = self.create_resources('port', ctx, {'port': port})
p['id'] = p['device_id']
ret.append(p)
return ret
return self.create_resources('port', ctx, body)
class FakeNeutronContext(test_utils.FakeNeutronContext):
def session_class(self):
return FakeSession
class FakeSession(test_utils.FakeSession):
def add_hook(self, model_obj, model_dict):
if model_obj.__tablename__ == 'subports':
for top_trunk in TOP_TRUNKS:
if top_trunk['id'] == model_dict['trunk_id']:
top_trunk['sub_ports'].append(model_dict)
def delete_top_subport(self, port_id):
for res_list in self.resource_store.store_map.values():
for res in res_list:
|
sub_ports = res.get('sub_ports')
if sub_ports:
for sub_port in sub_ports:
if sub_port['port_id'] == port_id:
sub_ports.remove(sub_port)
def delete_hook(self, model_obj):
if model_obj.get('segmentation_type'):
self.delete_top_subport(model_obj['port_id'])
return 'port_id'
class FakePlugin(trunk_plugin.TricircleTrunkPlugin):
de
|
f __init__(self):
self._segmentation_types = {'vlan': utils.is_valid_vlan_tag}
self.xjob_handler = FakeRPCAPI(self)
self.helper = helper.NetworkHelper(self)
def _get_client(self, region_name):
return FakeClient(region_name)
def fake_get_context_from_neutron_context(q_context):
ctx = context.get_db_context()
return ctx
def fake_get_min_search_step(self):
return 2
class FakeCorePlugin(central_plugin.TricirclePlugin):
def __init__(self):
self.type_manager = test_utils.FakeTypeManager()
def get_port(self, context, port_id):
return {portbindings.HOST_ID: None,
'device_id': None}
def get_ports(self, ctx, filters):
top_client = FakeClient()
_filters = []
for key, values in six.iteritems(filters):
for v in values:
_filters.append({'key': key, 'comparator': 'eq', 'value': v})
return top_client.list_resources('port', ctx, _filters)
def update_port(self, context, id, port):
port_body = port['port']
for _port in TOP_PORTS:
if _port['id'] == id:
for key, value in six.iteritems(port_body):
_port[key] = value
class PluginTest(unittest.TestCase):
def setUp(self):
|
yeyanchao/calibre
|
src/calibre/ebooks/txt/processor.py
|
Python
|
gpl-3.0
| 8,709
| 0.006889
|
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Read content from txt file.
'''
import os, re
from calibre import prepare_string_for_xml, isbytestring
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.conversion.preprocess import DocAnalysis
from calibre.utils.cleantext import clean_ascii_chars
HTML_TEMPLATE = u'<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>%s </title></head><body>\n%s\n</body></html>'
def clean_txt(txt):
'''
Run transformations on the text to put it into
consistent state.
'''
if isbytestring(txt):
txt = txt.decode('utf-8', 'replace')
# Strip whitespace from the end of the line. Also replace
# all line breaks with \n.
txt = '\n'.join([line.rstrip() for line in txt.splitlines()])
# Replace whitespace at the beginning of the line with
txt = re.sub('(?m)(?<=^)([ ]{2,}|\t+)(?=.)', ' ' * 4, txt)
# Condense redundant spaces
txt = re.sub('[ ]{2,}', ' ', txt)
# Remove blank space from the beginning and end of the document.
txt = re.sub('^\s+(?=.)', '', txt)
txt = re.sub('(?<=.)\s+$', '', txt)
# Remove excessive line breaks.
txt = re.sub('\n{5,}', '\n\n\n\n', txt)
#remove ASCII invalid chars : 0 to 8 and 11-14 to 24
txt = clean_ascii_chars(txt)
return txt
def split_txt(txt, epub_split_size_kb=0):
'''
Ensure there are split points for converting
to EPUB. A misdetected paragraph type can
result in the entire document being one giant
paragraph. In this case the EPUB parser will not
be able to determine where to split the file
to accomidate the EPUB file size limitation
and will fail.
'''
#Takes care if there is no point to split
if epub_split_size_kb > 0:
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
length_byte = len(txt)
#Calculating the average chunk value for easy splitting as EPUB (+2 as a safe margin)
chunk_size = long(length_byte / (int(length_byte / (epub_split_size_kb * 1024) ) + 2 ))
#if there are chunks with a superior size then go and break
if (len(filter(lambda x: len(x) > chunk_size, txt.split('\n\n')))) :
txt = '\n\n'.join([split_string_separator(line, chunk_size)
for line in txt.split('\n\n')])
if isbytestring(txt):
txt = txt.decode('utf-8')
return txt
def convert_basic(txt, title='', epub_split_size_kb=0):
'''
Converts plain text to html by putting all paragraphs in
<p> tags. It condense and retains blank lines when necessary.
Requires paragraphs to be in single line format.
'''
txt = clean_txt(txt)
txt = split_txt(txt, epub_split_size_kb)
lines = []
blank_count = 0
# Split into paragraphs based on having a blank line between text.
for line in txt.split('\n'):
if line.strip():
blank_count = 0
lines.append(u'<p>%s</p>' % prepare_string_for_xml(line.replace('\n', ' ')))
else:
blank_count += 1
if blank_count == 2:
lines.append(u'<p> </p>')
return HTML_TEMPLATE % (title, u'\n'.join(lines))
def convert_markdown(txt, title='', disable_toc=False):
from calibre.ebooks.markdown import markdown
extensions=['footnotes', 'tables']
if not disable_toc:
extensions.append('toc')
md = markdown.Markdown(
extensions,
safe_mode=False)
return HTML_TEMPLATE % (title, md.convert(txt))
def convert_textile(txt, title=''):
from calibre.ebooks.textile import textile
html = textile(txt, encoding='utf-8')
return HTML_TEMPLATE % (title, html)
def normalize_line_endings(txt):
txt = txt.replace('\r\n', '\n')
txt = txt.replace('\r', '\n')
return txt
def separate_paragraphs_single_line(txt):
txt = txt.replace('\n', '\n\n')
return txt
def separate_paragraphs_print_formatted(txt):
txt = re.sub(u'(?miu)^(?P<indent>\t+|[ ]{2,})(?=.)', lambda mo: '\n%s' % mo.group('indent'), txt)
return txt
def separate_hard_scene_breaks(txt):
def sep_break(line):
if len(line.strip()) > 0:
return '\n%s\n' % line
else:
return line
txt = re.sub(u'(?miu)^[ \t-=~\/_]+$', lambda mo: sep_break(mo.group()), txt)
return txt
def block_to_single_line(txt):
txt = re.sub(r'(?<=.)\n(?=.)', ' ', txt)
return txt
def preserve_spaces(txt):
'''
Replaces spaces multiple spaces with entities.
'''
txt = re.sub('(?P<space>[ ]{2,})', lambda mo: ' ' + (' ' * (len(mo.group('space')) - 1)), txt)
txt = txt.replace('\t', ' ')
return txt
def remove_indents(txt):
'''
Remove whitespace at the beginning of each line.
'''
txt = re.sub('(?miu)^\s+', '', txt)
return txt
def opf_writer(path, opf_name, manifest, spine, mi):
opf = OPFCreator(path, mi)
opf.create_manifest(manifest)
opf.create_spine(spine)
with open(os.path.join(path, opf_name), 'wb') as opffile:
opf.render(opffile)
def split_string_separator(txt, size):
'''
Splits the text by putting \n\n at the point size.
'''
if len(txt) > size:
txt = ''.join([re.sub(u'\.(?P<ends>[^.]*)$', '.\n\n\g<ends>',
txt[i:i+size], 1) for i in
xrange(0, len(txt), size)])
return txt
def detect_paragraph_type(txt):
'''
Tries to determine the paragraph type of the document.
block: Paragraphs are separated by a blank line.
single: Each line is a paragraph.
print: Each paragraph starts with a 2+ spaces or a tab
and ends when a new paragraph is reached.
unformatted: most lines have hard line breaks, few/no blank lines or indents
returns block, single, print, unformatted
'''
txt = txt.replace('\r\n', '\n')
txt = txt.replace('\r', '\n')
txt_line_count = len(re.findall('(?mu)^\s*.+$', txt))
# Check for hard line breaks - true if 55% of the doc breaks in the same region
docanalysis = DocAnalysis('txt', txt)
hardbreaks = docanalysis.line_histogram(.55)
if hardbreaks:
# Determine print percentage
tab_line_count = len(re.findall('(?mu)^(\t|\s{2,}).+$', txt))
print_percent = tab_line_count / float(txt_line_count)
# Determine block percentage
empty_line_count = len(re.findall('(?mu)^\s*$', txt))
block_percent = empty_line_count / float(txt_line_count)
# Compare the two types - the type with the larger number of instances wins
# in cases where only one or the other represents the vast majority of the document neither wins
if print_percent >= block_percent:
if .15 <= print_per
|
cent <= .75:
return 'print'
|
elif .15 <= block_percent <= .75:
return 'block'
# Assume unformatted text with hardbreaks if nothing else matches
return 'unformatted'
# return single if hardbreaks is false
return 'single'
def detect_formatting_type(txt):
'''
Tries to determine the formatting of the document.
markdown: Markdown formatting is used.
textile: Textile formatting is used.
heuristic: When none of the above formatting types are
detected heuristic is returned.
'''
# Keep a count of the number of format specific object
# that are found in the text.
markdown_count = 0
textile_count = 0
# Check for markdown
# Headings
markdown_count += len(re.findall('(?mu)^#+', txt))
markdown_count += len(re.findall('(?mu)^=+$', txt))
markdown_count += len(re.findall('(?mu)^-+$', txt))
# Images
markdown_count += len(re.findall('(?u)!\[.*?\](\[|\()', txt))
# Links
markdown_count += len(re.findall('(?u)^|[^!]\[.*?\](\[|\()', txt))
# Check for textile
# Headings
textile_count += len(re.findall(r'(?mu)^h[1-6]\.', txt))
# Block quote.
textile_count += len(re.findall(r'(?mu)^bq\.', txt))
# Images
textile_cou
|
thedep2/CouchPotatoServer
|
couchpotato/core/media/_base/providers/nzb/binnewz/main.py
|
Python
|
gpl-3.0
| 18,081
| 0.008683
|
from binsearch import BinSearch
from nzbclub import NZBClub
from nzbindex import NZBIndex
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import getTitle, splitString, tryInt
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.environment import Env
from couchpotato.core.logger import CPLog
from couchpotato.core.helpers import namer_check
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
log = CPLog(__name__)
import re
import urllib
import urllib2
import traceback
class Base(NZBProvider):
urls = {
'download': 'http://www.binnews.in/',
'detail': 'http://www.binnews.in/',
'search': 'http://www.binnews.in/_bin/search2.php',
}
http_time_between_calls = 4 # Seconds
cat_backup_id = None
def _search(self, movie, quality, results):
nzbDownloaders = [NZBClub(), BinSearch(), NZBIndex()]
MovieTitles = movie['info']['titles']
moviequality = simplifyString(quality['identifier'])
movieyear = movie['info']['year']
if quality['custom']['3d']==1:
threeD= True
else:
threeD=False
if moviequality in ("720p","1080p","bd50"):
cat1='39'
cat2='49'
minSize = 2000
elif moviequality in ("dvdr"):
cat1='23'
cat2='48'
minSize = 3000
else:
cat1='6'
cat2='27'
minSize = 500
for MovieTitle in MovieTitles:
try:
TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' '))
except:
continue
if threeD:
TitleStringReal = TitleStringReal + ' 3d'
data = 'chkInit=1&edTitre='+TitleStringReal+'&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D='+cat1+'&cats%5B%5D='+cat2+'&edAge=&edYear='
try:
soup = BeautifulSoup( urllib2.urlopen(self.urls['search'], data) )
except Exception, e:
log.error(u"Error trying to load BinNewz response: "+e)
return []
tables = soup.findAll("table", id="tabliste")
for table in tables:
rows = table.findAll("tr")
for row in rows:
cells = row.select("> td")
if (len(cells) < 11):
continue
name = cells[2].text.strip()
testname=namer_check.correctName(name,movie)
if testname==0:
continue
language = cells[3].find("img").get("src")
if not "_fr" in language and not "_frq" in language:
continue
detectedlang=''
if "_fr" in language:
detectedlang=' truefrench '
else:
detectedlang=' french '
# blacklist_groups = [ "alt.binaries.multimedia" ]
blacklist_groups = []
newgroupLink = cells[4].find("a")
newsgroup = None
if newgroupLink.contents:
newsgroup = newgroupLink.contents[0]
if newsgroup == "abmulti":
newsgroup = "alt
|
.binaries.multimedia"
elif newsgroup == "ab.moovee":
newsgroup = "alt.binaries.moovee"
elif newsgr
|
oup == "abtvseries":
newsgroup = "alt.binaries.tvseries"
elif newsgroup == "abtv":
newsgroup = "alt.binaries.tv"
elif newsgroup == "a.b.teevee":
newsgroup = "alt.binaries.teevee"
elif newsgroup == "abstvdivxf":
newsgroup = "alt.binaries.series.tv.divx.french"
elif newsgroup == "abhdtvx264fr":
newsgroup = "alt.binaries.hdtv.x264.french"
elif newsgroup == "abmom":
newsgroup = "alt.binaries.mom"
elif newsgroup == "abhdtv":
newsgroup = "alt.binaries.hdtv"
elif newsgroup == "abboneless":
newsgroup = "alt.binaries.boneless"
elif newsgroup == "abhdtvf":
newsgroup = "alt.binaries.hdtv.french"
elif newsgroup == "abhdtvx264":
newsgroup = "alt.binaries.hdtv.x264"
elif newsgroup == "absuperman":
newsgroup = "alt.binaries.superman"
elif newsgroup == "abechangeweb":
newsgroup = "alt.binaries.echange-web"
elif newsgroup == "abmdfvost":
newsgroup = "alt.binaries.movies.divx.french.vost"
elif newsgroup == "abdvdr":
newsgroup = "alt.binaries.dvdr"
elif newsgroup == "abmzeromov":
newsgroup = "alt.binaries.movies.zeromovies"
elif newsgroup == "abcfaf":
newsgroup = "alt.binaries.cartoons.french.animes-fansub"
elif newsgroup == "abcfrench":
newsgroup = "alt.binaries.cartoons.french"
elif newsgroup == "abgougouland":
newsgroup = "alt.binaries.gougouland"
elif newsgroup == "abroger":
newsgroup = "alt.binaries.roger"
elif newsgroup == "abtatu":
newsgroup = "alt.binaries.tatu"
elif newsgroup =="abstvf":
newsgroup = "alt.binaries.series.tv.french"
elif newsgroup =="abmdfreposts":
newsgroup="alt.binaries.movies.divx.french.reposts"
elif newsgroup =="abmdf":
newsgroup="alt.binaries.movies.french"
elif newsgroup =="abhdtvfrepost":
newsgroup="alt.binaries.hdtv.french.repost"
elif newsgroup == "abmmkv":
newsgroup = "alt.binaries.movies.mkv"
elif newsgroup == "abf-tv":
newsgroup = "alt.binaries.french-tv"
elif newsgroup == "abmdfo":
newsgroup = "alt.binaries.movies.divx.french.old"
elif newsgroup == "abmf":
newsgroup = "alt.binaries.movies.french"
elif newsgroup == "ab.movies":
newsgroup = "alt.binaries.movies"
elif newsgroup == "a.b.french":
newsgroup = "alt.binaries.french"
elif newsgroup == "a.b.3d":
newsgroup = "alt.binaries.3d"
elif newsgroup == "ab.dvdrip":
newsgroup = "alt.binaries.dvdrip"
elif newsgroup == "ab.welovelori":
newsgroup = "alt.binaries.welovelori"
elif newsgroup == "abblu-ray":
newsgroup = "alt.binaries.blu-ray"
elif newsgroup == "ab.bloaf":
newsgroup = "alt.binaries.bloaf"
elif newsgroup == "ab.hdtv.german":
newsgroup = "alt.binaries.hdtv.german"
elif newsgroup == "abmd":
newsgroup = "alt
|
NifTK/NiftyNet
|
tests/downsample_test.py
|
Python
|
apache-2.0
| 3,371
| 0.000297
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
from niftynet.layer.downsample import DownSampleLayer
from tests.niftynet_testcase import NiftyNetTestCase
class DownSampleTest(NiftyNetTestCase):
def get_3d_input(self):
input_shape = (2, 16, 16, 16, 8)
x = tf.ones(input_shape)
return x
def get_2d_input(self):
input_shape = (2, 16, 16, 8)
x = tf.ones(input_shape)
return x
def _test_nd_downsample_output_shape(self,
rank,
param_dict,
output_shape):
if rank == 2:
input_data = self.get_2d_input()
elif rank == 3:
input_data = self.get_3d_input()
downsample_layer = DownSampleLayer(**param_dict)
output_data = downsample_layer(input_data)
print(downsample_layer)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(output_data)
self.assertAllClose(output_shape, out.shape)
def test_3d_max_shape(self):
input_param = {'func': 'MAX',
'kernel_size':
|
3,
'stride': 3}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_pa
|
ram,
output_shape=(2, 6, 6, 6, 8))
def test_3d_avg_shape(self):
input_param = {'func': 'AVG',
'kernel_size': [3, 3, 2],
'stride': [3, 2, 1]}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 8, 16, 8))
def test_3d_const_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [1, 3, 2],
'stride': [3, 2, 2]}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 8, 8, 8))
def test_2d_max_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [1, 3],
'stride': 3}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 6, 6, 8))
def test_2d_avg_shape(self):
input_param = {'func': 'AVG',
'kernel_size': [2, 3],
'stride': 2}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 8, 8))
def test_2d_const_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [2, 3],
'stride': [2, 3]}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 6, 8))
if __name__ == "__main__":
tf.test.main()
|
readline/btb
|
cancer/integrate/coMut/mafSignature2plot.py
|
Python
|
gpl-2.0
| 3,469
| 0.036322
|
#!/usr/bin/env python
# Kai Yu
# github.com/readline
# mafSignature2plot.py
# 150410
from __future__ import division
import os,sys,math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use('Agg')
import os,sys,matplotlib,pylab
import matplotlib.pyplot as plt
import numpy as np
def font(size):
return matplotlib.font_manager.FontProperties(size=size,\
fname='/Share/BP/yukai/src/font/ARIAL.TTF')
def fontConsola(size):
r
|
eturn matplotlib.font_manager.FontProperties(
|
size=size,\
fname='/Share/BP/yukai/src/font/consola.ttf')
def getMatrix(sigPath):
infile = open(sigPath,'r')
countn = infile.readline().rstrip().split('=')[1]
mutList, tmpList = [],[]
sumdic = {'TA':0,'TC':0,'TG':0,'CA':0,'CG':0,'CT':0}
while 1:
line = infile.readline().rstrip()
if not line: break
c = line.split('\t')
mutList.append(c[0])
tmpList.append(float(c[1]))
bmut = c[0].split('>')[0][1] + c[0].split('>')[1][1]
sumdic[bmut] += float(c[1])
sumList = []
for bmut in ['TA','TC','TG','CA','CG','CT']:
sumList.append(sumdic[bmut])
infile.close()
return mutList, tmpList, sumList, countn
def getC(c):
tmpdic = []
for n in range(6):
for m in range(16):
tmpdic.append(c[n])
return tmpdic
def main():
try:
sigPath = sys.argv[1]
except:
print sys.argv[0] + ' [input signature path]'
sys.exit()
mutList, mat, sumList,countn = getMatrix(sigPath)
### Start plot
c = ['#FFFF99','#CCCCFF','#FFCC99','#CCFFCC','#CCFFFF','#FF9999']
col = getC(c)
fig = plt.figure(figsize = (20,10))
topy = int((max(mat)//5+1)*5)
ax1 = fig.add_axes([0.08,0.15,0.83,0.7])
ax2 = fig.add_axes([0.1,0.46,0.19,0.38])
ax1.set_xlim([0,96])
ax1.set_ylim([0,topy])
ax1.bar(range(96),mat,width=1,linewidth=0.5,color=col,alpha=1)
ax1.vlines([16,32,48,64,80],0,topy,linestyle='--',linewidth=1,color='black',alpha=0.7)
ax1.hlines(range(0,topy+1,5),0,96,linestyle='--',linewidth=1,color='black',alpha=0.7)
ax3 = ax1.twiny()
ax3.set_xlim([0,96])
ax3.set_ylim([0,topy])
ax3.set_xticks(range(8,96,16))
ax3.set_xticklabels(['T > A','T > C','T > G','C > A','C > G','C > T'],fontproperties=font(30))
plt.setp(ax3.get_xticklines(), visible=False)
mutList1 = []
for n in mutList:
mutList1.append(n[:3])
mutList0 = []
for n in range(96):
mutList0.append(n+0.7)
ax1.set_xticks(mutList0)
plt.setp(ax1.get_xticklines(), visible=False)
ax1.set_xticklabels(mutList1, fontproperties=fontConsola(16),rotation='vertical')
ax1.set_yticks(range(0,topy+1,5))
ax1.set_yticklabels(range(0,topy+1,5),fontproperties=font(24))
ax1.set_ylabel('Normalized rate / Mb',fontproperties=font(30))
# Pie plot
ax2.set_xticks([])
ax2.set_yticks([])
pielabel = ['T>A','T>C','T>G','C>A','C>G','C>T']
explode = [0.32,0.29,0.26,0.23,0.2,0]
pie1 = ax2.pie(sumList,labels=pielabel, explode=explode,colors=c,shadow=True,startangle=270)
for i in pie1[1]:
# print i
i.set_fontproperties(font(24))
# i.set_backgroundcolor('white')
ax1.text(32,topy*0.8,'%s\nn = %s'%(sigPath.split('/')[-1][:-10],countn),fontproperties=font(32),backgroundcolor='white')
plt.savefig(sigPath+'.sigplot.png')
plt.savefig(sigPath+'.sigplot.pdf')
if __name__ == '__main__':
main()
|
iirlas/OpenJam-2017
|
sge/gfx.py
|
Python
|
gpl-3.0
| 106,231
| 0.000028
|
# Copyright (C) 2012-2016 Julie Marchant <onpon4@riseup.net>
#
# This file is part of the Pygame SGE.
#
# The Pygame SGE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Pygame SGE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with the Pygame SGE. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides classes related to rendering graphics.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import math
import os
import warnings
import pygame
import six
import sge
from sge import r
from sge.r import (_check_color_input, _check_color, _scale, _get_blend_flags,
_screen_blend, f_split_text, s_get_image, s_set_size,
s_refresh, s_set_transparency, s_from_text, tg_blit)
COLORS = {'white': '#ffffff', 'silver': '#c0c0c0', 'gray': '#808080',
'black': '#000000', 'red': '#ff0000', 'maroon': '#800000',
'yellow': '#ffff00', 'olive': '#808000', 'lime': '#00ff00',
'green': '#008000', 'aqua': '#00ffff', 'teal': '#008080',
'blue': '#0000ff', 'navy': '#000080', 'fuchsia': '#ff00ff',
'purple': '#800080'}
COLOR_NAMES = {}
for pair in COLORS.items():
COLOR_NAMES[pair[1]] = pair[0]
__all__ = ["Color", "Sprite", "TileGrid", "Font", "BackgroundLayer",
"Background"]
class Color(object):
"""
This class stores color information.
Objects of this class can be converted to iterables indicating the
object's :attr:`red`, :attr:`green`, :attr:`blue`, and :attr:`alpha`
values, respectively; to integers which can be interpreted as a
hexadecimal representation of the color, excluding alpha
transparency; and to strings which indicate the English name of the
color (in all lowercase) if possible, and :attr:`hex_string`
otherwise.
.. attribute:: red
The red component of the color as an integer, where ``0``
indicates no red intensity and ``255`` indicates full red
intensity.
.. attribute:: green
The green component of the color as an integer, where ``0``
indicates no green intensity and ``255`` indicates full green
intensity.
.. attribute:: blue
The blue component of the color as an integer, where ``0``
indicates no blue intensity and ``255`` indicates full blue
intensity.
.. attribute:: alpha
The alpha transparency of the color as an integer, where ``0``
indicates full transparency and ``255`` indicates full opacity.
.. attribute:: hex_string
An HTML hex string representation of the color. (Read-only)
"""
def __init__(self, value):
"""
Arguments:
- ``value`` -- The value indicating the color represented by
this object. Should be one of the following:
- One of the 16 HTML color names (case-insensitive).
- An HTML-style hex string containing 3, 4, 6, or 8 digits
which indicate the red, green, blue, and alpha components of
the color, respectively, as pairs of hexadecimal digits. If
the string contains 3 or 4 digits, each digit
|
is duplicated;
for example, ``"#F80"`` is equivalent to ``"#FF8800"``.
- An integer which, when written as a hexadecimal n
|
umber,
specifies the components of the color in the same way as an
HTML-style hex string containing 6 digits.
- A list or tuple indicating the red, green, and blue
components, and optionally the alpha component, in that
order.
"""
self.alpha = 255
if isinstance(value, six.string_types):
value = COLORS.get(value, value)[1:]
if len(value) == 3:
r, g, b = [int(value[i] * 2, 16) for i in six.moves.range(3)]
self.red, self.green, self.blue = r, g, b
elif len(value) == 4:
r, g, b, a = [int(value[i] * 2, 16) for i in range(4)]
self.red, self.green, self.blue, self.alpha = r, g, b, a
elif len(value) == 6:
r, g, b = [int(value[i:(i + 2)], 16)
for i in six.moves.range(0, 6, 2)]
self.red, self.green, self.blue = r, g, b
elif len(value) == 8:
r, g, b, a = [int(value[i:(i + 2)], 16) for i in range(0, 8, 2)]
self.red, self.green, self.blue, self.alpha = r, g, b, a
else:
raise ValueError("Invalid color string.")
elif isinstance(value, six.integer_types):
b, g, r = [(value & 256 ** (i + 1) - 1) // 256 ** i
for i in six.moves.range(3)]
self.red, self.green, self.blue = r, g, b
elif isinstance(value, (list, tuple)):
if len(value) >= 3:
self.red, self.green, self.blue = value[:3]
if len(value) >= 4:
self.alpha = value[3]
else:
raise ValueError("Invalid color tuple.")
else:
raise ValueError("Invalid color value.")
@property
def red(self):
return self._r
@red.setter
def red(self, value):
self._r = _check_color_input(value)
@property
def green(self):
return self._g
@green.setter
def green(self, value):
self._g = _check_color_input(value)
@property
def blue(self):
return self._b
@blue.setter
def blue(self, value):
self._b = _check_color_input(value)
@property
def alpha(self):
return self._a
@alpha.setter
def alpha(self, value):
self._a = _check_color_input(value)
@property
def hex_string(self):
if self.alpha == 255:
r, g, b = [hex(c)[2:].zfill(2) for c in self[:3]]
return "#{}{}{}".format(r, g, b)
else:
r, g, b, a = [hex(c)[2:].zfill(2) for c in self]
return "#{}{}{}{}".format(r, g, b, a)
def __iter__(self):
return iter([self.red, self.green, self.blue, self.alpha])
def __int__(self):
return self.red * 256 ** 2 | self.green * 256 | self.blue
def __repr__(self):
return 'sge.gfx.Color("{}")'.format(str(self))
def __str__(self):
return COLOR_NAMES.get(self.hex_string, self.hex_string)
def __eq__(self, other):
return str(self) == str(other)
def __getitem__(self, index):
return tuple(self)[index]
def __setitem__(self, index, value):
c = list(self)
c[index] = value
self.red, self.green, self.blue, self.alpha = c
class Sprite(object):
"""
This class stores images and information about how the SGE is to use
those images.
What image formats are supported depends on the implementation of
the SGE, but image formats that are generally a good choice are PNG
and JPEG. See the implementation-specific information for a full
list of supported formats.
.. attribute:: width
The width of the sprite.
.. note::
Changing this attribute will cause the sprite to be scaled
horizontally. This is a destructive transformation: it can
result in loss of pixel information, especially if it is done
repeatedly. Because of this, it is advised that you do not
adjust this value for routine scaling. Use the
:attr:`image_xscale` attribute of a :class:`sge.dsp.Object`
object instead.
.. attribute:: height
The height of the sprite.
.. note::
Changing this attribute will c
|
emilleishida/snclass
|
snclass/matrix.py
|
Python
|
gpl-3.0
| 10,669
| 0.001312
|
"""
Created by Emille Ishida in May, 2015.
Class to implement calculations on data matrix.
"""
import os
import sys
import matplotlib.pylab as plt
import numpy as np
from multiprocessing import Pool
from snclass.treat_lc import LC
from snclass.util import read_user_input, read_snana_lc, translate_snid
from snclass.functions import core_cross_val, screen
##############################################
class DataMatrix(object):
"""
Data matrix class.
Methods:
- build: Build data matrix according to user input file specifications.
- reduce_dimension: Perform dimensionality reduction.
- cross_val: Perform cross-validation.
Attributes:
- user_choices: dict, user input choices
- snid: vector, list of objects identifiers
- datam: array, data matrix for training
- redshift: vector, redshift for training data
- sntype: vector, classification of training data
- low_dim_matrix: array, data matrix in KernelPC space
- transf_test: function, project argument into KernelPC space
- final: vector, optimize parameter values
"""
def __init__(self, input_file=None):
"""
Read user input file.
input: input_file -> str
name of user input file
"""
self.datam = None
self.snid = []
self.redshift = None
self.sntype = None
self.low_dim_matrix = None
self.transf_test = None
self.final = None
self.test_projection = []
if input_file is not None:
self.user_choices = read_user_input(input_file)
def check_file(self, filename, epoch=True, ref_filter=None):
"""
Construct one line of the data matrix.
input: filename, str
file of raw data for 1 supernova
epoch, bool - optional
If true, check if SN satisfies epoch cuts
Default is True
ref_filter, str - optional
Reference filter for peak MJD calculation
Default is None
"""
screen('Fitting ' + filename, self.user_choices)
# translate identifier
self.user_choices['path_to_lc'] = [translate_snid(filename, self.user_choices['photon_flag'][0])[0]]
# read light curve raw data
raw = read_snana_lc(self.user_choices)
# initiate light curve object
lc_obj = LC(raw, self.user_choices)
# load GP fit
lc_obj.load_fit_GP(self.user_choices['samples_dir'][0] + filename)
# normalize
lc_obj.normalize(ref_filter=ref_filter)
# shift to peak mjd
lc_obj.mjd_shift()
if epoch:
# check epoch requirements
lc_obj.check_epoch()
else:
lc_obj.epoch_cuts = True
if lc_obj.epoch_cuts:
# build data matrix lines
lc_obj.build_steps()
# store
obj_line = []
for fil in self.user_choices['filters']:
for item in lc_obj.flux_for_matrix[fil]:
obj_line.append(item)
rflag = self.user_choices['redshift_flag'][0]
redshift = raw[rflag][0]
obj_class = raw[self.user_choices['type_flag'][0]][0]
self.snid.append(raw['SNID:'][0])
return obj_line, redshift, obj_class
else:
screen('... Failed to pass epoch cuts!', self.user_choices)
screen('\n', self.user_choices)
return None
def store_training(self, file_out):
"""
Store complete training matrix.
input: file_out, str
output file name
"""
# write to file
if file_out is not None:
op1 = open(file_out, 'w')
op1.write('SNID type z LC...\n')
for i in xrange(len(self.datam)):
op1.write(str(self.snid[i]) + ' ' + str(self.sntype[i]) +
' ' + str(self.redshift[i]) + ' ')
for j in xrange(len(self.datam[i])):
op1.write(str(self.datam[i][j]) + ' ')
op1.write('\n')
op1.close()
def build(self, file_out=None, check_epoch=True, ref_filter=None):
"""
Build data matrix according to user input file specifications.
input: file_out -> str, optional
file to store data matrix (str). Default is None
check_epoch -> bool, optional
If True check if SN satisfies epoch cuts
Default is True
ref_filter -> str, optional
Reference filter for MJD calculation
Default is None
"
|
""
# list all files in sample directory
file_list = os.listdir(self.user_choices['samples_dir'][0])
datam = []
redshift = []
sntype = []
for obj in file_list:
if 'mean' in obj:
sn_char = self.check_file(obj, epoch=check_epoch,
ref_filter=ref_filter)
if sn_char is not None:
datam.append(sn_char[0])
redshift.append(
|
sn_char[1])
sntype.append(sn_char[2])
self.datam = np.array(datam)
self.redshift = np.array(redshift)
self.sntype = np.array(sntype)
# store results
self.store_training(file_out)
def reduce_dimension(self):
"""Perform dimensionality reduction with user defined function."""
# define dimensionality reduction function
func = self.user_choices['dim_reduction_func']
# reduce dimensionality
self.low_dim_matrix = func(self.datam, self.user_choices)
# define transformation function
self.transf_test = func(self.datam, self.user_choices, transform=True)
def cross_val(self):
"""Optimize the hyperparameters for RBF kernel and ncomp."""
# correct type parameters if necessary
types_func = self.user_choices['transform_types_func']
if types_func is not None:
self.sntype = types_func(self.sntype, self.user_choices['Ia_flag'][0])
# initialize parameters
data = self.datam
types = self.sntype
choices = self.user_choices
nparticles = self.user_choices['n_cross_val_particles']
parameters = []
for i in xrange(nparticles):
pars = {}
pars['data'] = data
pars['types'] = types
pars['user_choices'] = choices
parameters.append(pars)
if int(self.user_choices['n_proc'][0]) > 0:
cv_func = self.user_choices['cross_validation_func']
pool = Pool(processes=int(self.user_choices['n_proc'][0]))
my_pool = pool.map_async(cv_func, parameters)
try:
results = my_pool.get(0xFFFF)
except KeyboardInterrupt:
print 'Interruputed by the user!'
sys.exit()
pool.close()
pool.join()
results = np.array(results)
else:
number = self.user_choices['n_cross_val_particles']
results = np.array([core_cross_val(pars)
for pars in parameters])
flist = list(results[:,len(results[0])-1])
max_success = max(flist)
indx_max = flist.index(max_success)
self.final = {}
for i in xrange(len(self.user_choices['cross_val_par'])):
par_list = self.user_choices['cross_val_par']
self.final[par_list[i]] = results[indx_max][i]
def final_configuration(self):
"""Determine final configuraton based on cross-validation results."""
#update optimized hyper-parameters
for par in self.user_choices['cross_val_par']:
indx = self.user_choices['cross_val_par'].index(par)
self.user_choices[par] = self.final[par]
#update low dimensional matrix
self.reduce_dimension()
def plot(self, pcs,
|
deyvedvm/cederj
|
urionlinejudge/python/1379.py
|
Python
|
gpl-3.0
| 781
| 0.006452
|
"""
The mean of three integers A, B and C is (A + B + C)/3. The median of three integers is the one that would be in the
middle if they are sort
|
ed in non-decreasing order. Given two integers A and B, return the minimum possible integer C
such that the mean and the median of A, B and C are equal.
Input
Each test case is given in a single line that contains two integers A and B (1 ≤ A ≤ B ≤ 109). The las
|
t test case is
followed by a line containing two zeros.
Output
For each test case output one line containing the minimum possible integer C such that the mean and the median of A, B
and C are equal.
"""
A = int
B = int
C = int
while B != 0 and C != 0:
B, C = map(int, input().split())
if 1 <= B <= C <= 10 ** 9:
A = 2 * B - C
print(A)
|
googleads/google-ads-python
|
google/ads/googleads/v10/enums/types/asset_set_asset_status.py
|
Python
|
apache-2.0
| 1,168
| 0.000856
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
|
# You may obtain a copy of the License at
#
# http
|
://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"AssetSetAssetStatusEnum",},
)
class AssetSetAssetStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of an asset
set asset.
"""
class AssetSetAssetStatus(proto.Enum):
r"""The possible statuses of an asset set asset."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
apple/coremltools
|
coremltools/converters/mil/mil/passes/test_layernorm_instancenorm_fusion.py
|
Python
|
bsd-3-clause
| 15,619
| 0.002753
|
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import pytest
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
np.random.seed(6174)
class TestLayerNormOrInstanceNormFusionPass:
@pytest.mark.parametrize("axes_size", [1, 2, 3])
def test_layer_norm(self, axes_size):
"""
Detect layer norm pattern, found in the TF bert model.
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
where mean and variance are computed along axes [-1] or
|
[-1,-2] and so on
and gamma and beta are constants with rank equal to the length of the axes parameter.
"""
|
shape = (3, 5, 6)
rank = len(shape)
axes = list(range(rank - axes_size, rank))
@mb.program(input_specs=[mb.TensorSpec(shape=shape)])
def prog(x):
x1 = mb.reduce_mean(x=x, axes=axes, keep_dims=True)
x2 = mb.sub(x=x, y=x1)
x2 = mb.square(x=x2)
x2 = mb.reduce_mean(x=x2, axes=axes, keep_dims=True)
x2 = mb.add(x=x2, y=1e-5)
x2 = mb.rsqrt(x=x2)
x3 = mb.mul(x=np.random.rand(*shape[-len(axes) :]), y=x2)
x4 = mb.mul(x=x3, y=x1)
x5 = mb.mul(x=x, y=x3)
x4 = mb.sub(x=np.random.rand(*shape[-len(axes) :]), y=x4)
y = mb.add(x=x4, y=x5)
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_layernorm_or_instancenorm"
)
assert get_op_types_in_program(prev_prog) == [
"reduce_mean",
"sub",
"square",
"reduce_mean",
"add",
"rsqrt",
"mul",
"mul",
"mul",
"sub",
"add",
]
assert get_op_types_in_program(prog) == ["layer_norm"]
assert_model_is_valid(
prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape}
)
def test_instance_norm_pattern_1(self):
"""
Detect instance norm pattern
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens,
and gamma and beta are of shape (1,C,1,1)
"""
shape = (3, 5, 6, 7)
@mb.program(input_specs=[mb.TensorSpec(shape=shape)])
def prog(x):
x1 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True)
x2 = mb.sub(x=x, y=x1)
x2 = mb.square(x=x2)
x2 = mb.reduce_mean(x=x2, axes=[2, 3], keep_dims=True)
x2 = mb.add(x=x2, y=1e-5)
x2 = mb.rsqrt(x=x2)
x3 = mb.mul(x=np.random.rand(1, shape[1], 1, 1), y=x2)
x4 = mb.mul(x=x3, y=x1)
x5 = mb.mul(x=x, y=x3)
x4 = mb.sub(x=np.random.rand(1, shape[1], 1, 1), y=x4)
y = mb.add(x=x4, y=x5)
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_layernorm_or_instancenorm"
)
assert get_op_types_in_program(prev_prog) == [
"reduce_mean",
"sub",
"square",
"reduce_mean",
"add",
"rsqrt",
"mul",
"mul",
"mul",
"sub",
"add",
]
assert get_op_types_in_program(prog) == ["instance_norm"]
assert_model_is_valid(
prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape}
)
def test_instance_norm_pattern_1_rank_1_gamma_beta(self):
"""
Detect instance norm pattern
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens,
and gamma and beta are of shape (C,)
"""
shape = (3, 5, 6, 7)
@mb.program(input_specs=[mb.TensorSpec(shape=shape)])
def prog(x):
x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True)
x2 = mb.sub(x=x, y=x1)
x2 = mb.square(x=x2)
x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True)
x2 = mb.add(x=x2, y=1e-5)
x2 = mb.rsqrt(x=x2)
x3 = mb.mul(x=np.random.rand(shape[3]), y=x2)
x4 = mb.mul(x=x3, y=x1)
x5 = mb.mul(x=x, y=x3)
x4 = mb.sub(x=np.random.rand(shape[3]), y=x4)
y = mb.add(x=x4, y=x5)
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_layernorm_or_instancenorm"
)
assert get_op_types_in_program(prev_prog) == [
"reduce_mean",
"sub",
"square",
"reduce_mean",
"add",
"rsqrt",
"mul",
"mul",
"mul",
"sub",
"add",
]
assert get_op_types_in_program(prog) == ["transpose", "instance_norm", "transpose"]
assert_model_is_valid(
prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape}
)
def test_instance_norm_pattern_1_with_channel_last_data_format(self):
"""
Detect instance norm pattern with channel last data format
x = transpose(x) # channel first to channel last, NCHW -> NHWC
x = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
x = transpose(x) # channel last to channel first, NHWC -> NCHW
The input is rank 4 (N, C, H, W) and the input for fused "instance_norm" op is
rank 4 (N, H, W, C), and axis=[1, 2] or [-3, -2], along which reduction happens.
This is common in TensorFlow model when data format is channel last.
PyMIL inserts transposes around "conv" layer to make "conv" channel first.
"fuse_layernorm_or_instancenorm" pass is expected to fuse this pattern as well.
"""
shape = (1, 3, 5, 5)
@mb.program(input_specs=[mb.TensorSpec(shape=shape)])
def prog(x):
x = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True)
x2 = mb.sub(x=x, y=x1)
x2 = mb.square(x=x2)
x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True)
x2 = mb.add(x=x2, y=1e-5)
x2 = mb.rsqrt(x=x2)
x3 = mb.mul(x=np.random.rand(1, 1, 1, shape[1]), y=x2)
x4 = mb.mul(x=x3, y=x1)
x5 = mb.mul(x=x, y=x3)
x4 = mb.sub(x=np.random.rand(1, 1, 1, shape[1]), y=x4)
x6 = mb.add(x=x4, y=x5)
y = mb.transpose(x=x6, perm=[0, 3, 1, 2])
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_layernorm_or_instancenorm"
)
assert get_op_types_in_program(prev_prog) == [
"transpose",
"reduce_mean",
"sub",
"square",
"reduce_mean",
"add",
"rsqrt",
"mul",
"mul",
"mul",
"sub",
"add",
"transpose",
]
assert get_op_types_in_program(prog) == [
"transpose",
"transpose",
"instance_norm",
"transpose",
"transpose",
]
assert_model_is_valid(
prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape},
)
# reduce transpose pass should remove extra ones
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
assert get_op_types_in_program(prog) == ["
|
Got-iT-Services-Inc/pyDebugger
|
Debug.py
|
Python
|
gpl-3.0
| 2,272
| 0.007482
|
#! /usr/bin/env python3
#############################################################
# Title: Python Debug Class #
# Description: Wrapper around Debugging and Logging for any #
# python class #
# Version: #
# * Version 1.00 03/28/2016 RC #
# * Version 1.10 12/16/2016 RC #
# * Version 1.11 07/06/2018 RC #
# #
# Author: Richard Cintorino (c) Richard Cintorino 2016 #
#############################################################
from datetime import datetime
class pyDebugger:
#Debug Function
def Log(self, sString, endd="\n",PrintName=True, DebugLevel="ALL", PrintTime=True):
sCN = ""
if DebugLevel in self.__DebugLevel or "ALL" in self.__DebugLevel:
if PrintTime == True:
sCN += datetime.now().strftime("%Y.%m.%d %H:%M:%S") + " "
if PrintName == True:
if self.__
|
PrintDebugLevel == True:
sCN += "[" + DebugLevel + "] "
sCN += self.ClassName + ":"
if self.__Debug == True:
print(sCN + sString,end=endd)
if self.__LogToFile == True:
try:
with open("/var/log/" + self.ClassName + ".log", "a+") as logFile:
logFile.write(str(datetime.now()) + " " + sCN + sString+endd)
logFile.close()
except Exception
|
as e:
print(self.ClassName + "_Logging Error: " + str(e))
def SetDebugLevel(self,DebugLevel):
if "," in DebugLevel:
self.__DebugLevel = DebugLevel.split(",")
else:
self.__DebugLevel = DebugLevel
def __init__(self,otherSelf, Debug, LogToFile, DebugLevel="ALL,NONE", PrintDebugLevel=True):
self.SetDebugLevel(DebugLevel)
self.__PrintDebugLevel = PrintDebugLevel
self.__Debug = Debug
self.__LogToFile = LogToFile
self.ClassName = otherSelf.__class__.__name__
self.Log("Starting Debugger, Debug="+ str(Debug) + " LogToFile=" + str(LogToFile))
|
smartfile/django-mysqlpool
|
django_mysqlpool/backends/mysqlpool/base.py
|
Python
|
mit
| 3,693
| 0
|
# -*- coding: utf-8 -*-
"""The top-level package for ``django-mysqlpool``."""
# These imports make 2 act like 3, making it easier on us to switch to PyPy or
# some other VM if we need to for performance reasons.
from __future__ import (absolute_import, print_function, unicode_literals,
division)
# Make ``Foo()`` work the same in Python 2 as it does in Python 3.
__metaclass__ = type
import os
from django.conf import settings
from django.db.backends.mysql import base
from django.core.exceptions import ImproperlyConfigured
try:
import sqlalchemy.pool as pool
except ImportError as e:
raise ImproperlyConfigured("Error loading SQLAlchemy module: %s" % e)
# Global variable to hold the actual connection pool.
MYSQLPOOL = None
# Default pool type (QueuePool, SingletonThreadPool, AssertionPool, NullPool,
# StaticPool).
DEFAULT_BACKEND = 'QueuePool'
# Needs to be less than MySQL connection timeout (server setting). The default
# is 120, so default to 119.
DEFAULT_POOL_TIMEOUT = 119
def isiterable(value):
"""Determine whether ``value`` is iterable."""
try:
iter(value)
return True
except TypeError:
return False
class OldDatabaseProxy():
"""Saves a reference to the old connect function.
Proxies calls to its own connect() method to the old function.
"""
def __init__(self, old_connect):
"""Store ``old_connect`` to be used whenever we connect."""
self.old_connect = old_connect
def connect(self, **kwargs):
"""Delegate to the old ``connect``."""
# Bounce the call to the old function.
return self.old_connect(**kwargs)
class HashableDict(dict):
"""A dictionary that is hashable.
This is not generally useful, but created specifically to hold the ``conv``
parameter
|
that needs to be passed to MySQLdb.
"""
def __hash__(self):
"""Calculate the hash of this ``dict``.
The hash is determined by converting to a sorted tuple of key-value
pairs and hashing that.
"""
items = [(n, tuple(v)) for n, v in self.items() if isiterable(v)]
return hash(tuple(items))
# Define this here so Django can import it
|
.
DatabaseWrapper = base.DatabaseWrapper
# Wrap the old connect() function so our pool can call it.
OldDatabase = OldDatabaseProxy(base.Database.connect)
def get_pool():
"""Create one and only one pool using the configured settings."""
global MYSQLPOOL
if MYSQLPOOL is None:
backend_name = getattr(settings, 'MYSQLPOOL_BACKEND', DEFAULT_BACKEND)
backend = getattr(pool, backend_name)
kwargs = getattr(settings, 'MYSQLPOOL_ARGUMENTS', {})
kwargs.setdefault('poolclass', backend)
kwargs.setdefault('recycle', DEFAULT_POOL_TIMEOUT)
MYSQLPOOL = pool.manage(OldDatabase, **kwargs)
setattr(MYSQLPOOL, '_pid', os.getpid())
if getattr(MYSQLPOOL, '_pid', None) != os.getpid():
pool.clear_managers()
return MYSQLPOOL
def connect(**kwargs):
"""Obtain a database connection from the connection pool."""
# SQLAlchemy serializes the parameters to keep unique connection
# parameter groups in their own pool. We need to store certain
# values in a manner that is compatible with their serialization.
conv = kwargs.pop('conv', None)
ssl = kwargs.pop('ssl', None)
if conv:
kwargs['conv'] = HashableDict(conv)
if ssl:
kwargs['ssl'] = HashableDict(ssl)
# Open the connection via the pool.
return get_pool().connect(**kwargs)
# Monkey-patch the regular mysql backend to use our hacked-up connect()
# function.
base.Database.connect = connect
|
qadium-memex/linkalytics
|
linkalytics/factor_validator/coincidence/coincidence.py
|
Python
|
apache-2.0
| 1,870
| 0.01123
|
from datetime import datetime
from ... search import get_results, phone_hits, both_hits
from ... run_cli import Arguments
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(16)
def unique_featu
|
res(feature, data):
features = set()
for v in data.values():
try:
if isinstance(v[feature], str):
|
features.add(v[feature])
elif isinstance(v[feature], list):
for i in v:
features.add(v[feature])
except:
pass
return features
def parsetime(timestring):
return datetime.strptime(timestring, '%Y-%m-%dT%H:%M:%S').ctime()
def specific_term(args):
accumulator = lambda x: get_term(x, args)
query = args.query[0]
results = get_results(query, int(args.size[0]), True)
phone = unique_features("phone", results)
posttime = unique_features("posttime", results)
output = {
'phrase' : query,
'total' : len(phone),
'initial_date': parsetime(min(posttime)),
'final_date' : parsetime(max(posttime)),
}
output['results'] = dict(pool.map(accumulator, phone))
return output
def get_term(pid, args):
phone_res = phone_hits(pid, int(args.size[0]))
both_res = both_hits(args.query[0], pid)
date_phone = set()
for v in phone_res.values():
try:
date_phone.add(v["posttime"])
except:
pass
term = {
'results':{
'phone' : phone_res['total'],
'both' : both_res['total'],
},
'date': {
'initial': parsetime(min(date_phone)),
'final' : parsetime(max(date_phone)),
}
}
return pid, term
def run(node):
args = Arguments(node.get('text', 'bouncy'), node.get('size', 100))
return specific_term(args)
|
mitsei/dlkit
|
dlkit/abstract_osid/authorization/managers.py
|
Python
|
mit
| 119,516
| 0.002083
|
"""Implementations of authorization abstract base class managers."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class AuthorizationProfile:
"""The ``AuthorizationProfile`` describes the interoperability among authorization services."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def supports_visible_federation(self):
"""Tests if federation is visible.
:return: ``true`` if visible federation is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization(self):
"""Tests for the availability of an authorization service which is the basic service for checking authorizations.
:return: ``true`` if authorization is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory
|
-- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_lookup(self):
"""Tests if an authorization lookup service is supported.
An authorization lookup service defines methods to access
authorizations.
|
:return: true if authorization lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_query(self):
"""Tests if an authorization query service is supported.
:return: ``true`` if authorization query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_search(self):
"""Tests if an authorization search service is supported.
:return: ``true`` if authorization search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_admin(self):
"""Tests if an authorization administrative service is supported.
:return: ``true`` if authorization admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_notification(self):
"""Tests if authorization notification is supported.
Messages may be sent when authorizations are created, modified,
or deleted.
:return: ``true`` if authorization notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_vault(self):
"""Tests if an authorization to vault lookup session is available.
:return: ``true`` if authorization vault lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_vault_assignment(self):
"""Tests if an authorization to vault assignment session is available.
:return: ``true`` if authorization vault assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_authorization_smart_vault(self):
"""Tests if an authorization smart vaulting session is available.
:return: ``true`` if authorization smart vaulting is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_lookup(self):
"""Tests if a function lookup service is supported.
A function lookup service defines methods to access
authorization functions.
:return: ``true`` if function lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_query(self):
"""Tests if a function query service is supported.
:return: ``true`` if function query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_search(self):
"""Tests if a function search service is supported.
:return: ``true`` if function search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_admin(self):
"""Tests if a function administrative service is supported.
:return: ``true`` if function admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_notification(self):
"""Tests if function notification is supported.
Messages may be sent when functions are created, modified, or
deleted.
:return: ``true`` if function notification is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_vault(self):
"""Tests if a function to vault lookup session is available.
:return: ``true`` if function vault lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_vault_assignment(self):
"""Tests if a function to vault assignment session is available.
:return: ``true`` if function vault assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_function_smart_vault(self):
"""Tests if a function smart vaulting session is available.
:return: ``true`` if function smart vaulting is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_qualifier_lookup(self):
"""Tests if a qualifier lookup service is supported.
A function lookup service defines methods to access
authorization qualifiers.
:return: ``true`` if qualifier lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boo
|
ESOedX/edx-platform
|
openedx/core/djangoapps/crawlers/tests/test_models.py
|
Python
|
agpl-3.0
| 1,412
| 0.002126
|
# -*- coding: utf-8 -*-
"""
Tests that the request came from a crawler or not.
"""
from __future__ import absolute_import
import ddt
from django.test import TestCase
from django.http import HttpRequest
from ..models import CrawlersConfig
@ddt.ddt
class CrawlersConfigTest(TestCase):
def setUp(self):
super(CrawlersConfigTest, self).setUp()
CrawlersConfig(known_user_agents='edX-downloader,crawler_foo', enabled=True).save()
|
@ddt.data(
"Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0",
|
"Le Héros des Deux Mondes",
)
def test_req_user_agent_is_not_crawler(self, req_user_agent):
"""
verify that the request did not come from a crawler.
"""
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = req_user_agent
self.assertFalse(CrawlersConfig.is_crawler(fake_request))
@ddt.data(
u"edX-downloader",
"crawler_foo".encode("utf-8")
)
def test_req_user_agent_is_crawler(self, req_user_agent):
"""
verify that the request came from a crawler.
"""
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = req_user_agent
self.assertTrue(CrawlersConfig.is_crawler(fake_request))
|
citrix-openstack-build/sahara
|
sahara/conductor/manager.py
|
Python
|
apache-2.0
| 14,758
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles database requests from other Sahara services."""
import copy
from sahara.db import base as db_base
from sahara.utils import configs
from sahara.utils import crypto
CLUSTER_DEFAULTS = {
"cluster_configs": {},
"status": "undefined",
"anti_affinity": [],
"status_description": "",
"info": {},
"rollback_info": {},
"sahara_info": {},
}
NODE_GROUP_DEFAULTS = {
"node_processes": [],
"node_configs": {},
"volumes_per_node": 0,
"volumes_size": 0,
"volumes_availability_zone": None,
"volume_mount_prefix": "/volumes/disk",
"volume_type": None,
"floating_ip_pool": None,
"security_groups": None,
"auto_security_group": False,
"availability_zone": None,
}
INSTANCE_DEFAULTS = {
"volumes": []
}
DATA_SOURCE_DEFAULTS = {
"credentials": {}
}
def _apply_defaults(values, defaults):
new_values = copy.deepcopy(defaults)
new_values.update(values)
return new_values
class ConductorManager(db_base.Base):
"""This class aimed to conduct things.
The methods in the base API for sahara-conductor are various proxy
operations that allows other services to get specific work done without
locally accessing the database.
Additionally it performs some template-to-object copying magic.
"""
def __init__(self):
super(ConductorManager, self).__init__()
# Common helpers
def _populate_node_groups(self, context, cluster):
node_groups = cluster.get('node_groups')
if not node_groups:
return []
populated_node_groups = []
for node_group in node_groups:
populated_node_group = self._populate_node_group(context,
node_group)
self._cleanup_node_group(populated_node_group)
populated_node_group["tenant_id"] = context.tenant_id
populated_node_groups.append(
populated_node_group)
return populated_node_groups
def _cleanup_node_group(self, node_group):
node_group.pop('id', None)
node_group.pop('created_at', None)
node_group.pop('updated_at', None)
def _populate_node_group(self, context, node_group):
node_group_merged = copy.deepcopy(NODE_GROUP_DEFAULTS)
ng_tmpl_id = node_group.get('node_group_template_id')
ng_tmpl = None
if ng_tmpl_id:
ng_tmpl = self.node_group_template_get(context, ng_tmpl_id)
self._cleanup_node_group(ng_tmpl)
node_group_merged.update(ng_tmpl)
node_group_merged.update(node_group)
if ng_tmpl:
node_group_merged['node_configs'] = configs.merge_configs(
ng_tmpl.get('node_configs'),
node_group.get('node_configs'))
return node_group_merged
# Cluster ops
def cluster_get(self, context, cluster):
"""Return the cluster or None if it does not exist."""
return self.db.cluster_get(context, cluster)
def cluster_get_all(self, context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(plugin_name='vanilla', hadoop_version='1.1')
"""
return self.db.cluster_get_all(context, **kwargs)
def cluster_create(self, context, values):
"""Create a cluster from the values dictionary."""
# loading defaults
merged_values = copy.deepcopy(CLUSTER_DEFAULTS)
merged_values['tenant_id'] = context.tenant_id
private_key, public_key = crypto.generate_key_pair()
merged_values['management_private_key'] = private_key
merged_values['management_public_key'] = public_key
cluster_template_id = values.get('cluster_template_id')
c_tmpl = None
if cluster_template_id:
c_tmpl = self.cluster_template_get(context, cluster_template_id)
del c_tmpl['created_at']
del c_tmpl['updated_at']
del c_tmpl['id']
# updating with cluster_template values
merged_values.update(c_tmpl)
# updating with values provided in request
merged_values.update(values)
if c_tmpl:
merged_values['cluster_configs'] = configs.merge_configs(
c_tmpl.get('cluster_configs'),
values.get('cluster_configs'))
merged_values['node_groups'] = self._populate_node_groups(
context, merged_values)
return self.db.cluster_create(context, merged_values)
def cluster_update(self, context, cluster, values):
"""Set the given properties on cluster and update it."""
values = copy.deepcopy(values)
return self.db.cluster_update(context, cluster, values)
def cluster_destroy(self, context, cluster):
"""Destroy the cluster or raise if it does not exist."""
self.db.cluster_destroy(context, cluster)
# Node Group ops
def node_group_add(self, context, cluster, values):
"""Create a Node Group from the values dictionary."""
values = copy.deepcopy(values)
values = self._populate_node_group(context, values)
values['tenant_id'] = context.tenant_id
return self.db.node_group_add(context, cluster, values)
def node_group_update(self, context, node_group, values):
"""Set the given properties on node_group and update it."""
values = copy.deepcopy(values)
self.db.node_group_update(context, node_group, values)
def
|
node_group_remove(self, context, node_group):
"""Destroy the node_group or raise if it does not exist."""
self.db.node_group_remove(cont
|
ext, node_group)
# Instance ops
def instance_add(self, context, node_group, values):
"""Create an Instance from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, INSTANCE_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.instance_add(context, node_group, values)
def instance_update(self, context, instance, values):
"""Set the given properties on Instance and update it."""
values = copy.deepcopy(values)
self.db.instance_update(context, instance, values)
def instance_remove(self, context, instance):
"""Destroy the Instance or raise if it does not exist."""
self.db.instance_remove(context, instance)
# Volumes ops
def append_volume(self, context, instance, volume_id):
"""Append volume_id to instance."""
self.db.append_volume(context, instance, volume_id)
def remove_volume(self, context, instance, volume_id):
"""Remove volume_id in instance."""
self.db.remove_volume(context, instance, volume_id)
# Cluster Template ops
def cluster_template_get(self, context, cluster_template):
"""Return the cluster_template or None if it does not exist."""
return self.db.cluster_template_get(context, cluster_template)
def cluster_template_get_all(self, context):
"""Get all cluster_templates."""
return self.db.cluster_template_get_all(context)
def cluster_template_create(self, context, values):
"""Create a cluster_template from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, CLUSTER_DEFAULTS)
values['tenant_id'] = context.tenant_id
values['node_groups'] = self._populate_node_groups(context, values)
return self.db.cluster_template_create(context, values)
def cluster_temp
|
JoKaWare/WTL-DUI
|
tools/grit/grit/node/message.py
|
Python
|
bsd-3-clause
| 10,287
| 0.009138
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Handling of the <message> element.
'''
import re
import types
from grit.node import base
import grit.format.rc_header
import grit.format.rc
from grit import clique
from grit import exception
from grit import lazy_re
from grit import tclib
from grit import util
BINARY, UTF8, UTF16 = range(3)
# Finds whitespace at the start and end of a string which can be multiline.
_WHITESPACE = lazy_re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z',
re.DOTALL | re.MULTILINE)
class MessageNode(base.ContentNode):
'''A <message> element.'''
# For splitting a list of things that can be separated by commas or
# whitespace
_SPLIT_RE = lazy_re.compile('\s*,\s*|\s+')
def __init__(self):
super(type(self), self).__init__()
# Valid after EndParsing, this is the MessageClique that contains the
# source message and any translations of it that have been loaded.
self.clique = None
# We don't send leading and trailing whitespace into the translation
# console, but rather tack it onto the source message and any
# translations when formatting them into RC files or what have you.
self.ws_at_start = '' # Any whitespace characters at the start of the text
self.ws_at_end = '' # --"-- at the end of the text
# A list of "shortcut groups" this message is in. We check to make sure
# that shortcut keys (e.g. &J) within each shortcut group are unique.
self.shortcut_groups_ = []
def _IsValidChild(self, child):
return isinstance(child, (PhNode))
def _IsValidAttribute(self, name, value):
if name not in ['name', 'offset', 'translateable', 'desc', 'meaning',
'internal_comment', 'shortcut_groups', 'custom_type',
|
'validation_expr', 'use_name_for_id', 'sub_variable']:
return False
if (name in ('translateable', 'sub_variable') and
value not in ['true', 'false']):
return False
return True
def MandatoryAttributes(self):
return ['name|offset']
def DefaultAttributes(self):
return {
'custom_type' : '',
'desc' : '',
'internal_comment' : '',
'meaning' : '',
'shortcut_groups' : '',
'sub_variable' : 'f
|
alse',
'translateable' : 'true',
'use_name_for_id' : 'false',
'validation_expr' : '',
}
def GetTextualIds(self):
'''
Returns the concatenation of the parent's node first_id and
this node's offset if it has one, otherwise just call the
superclass' implementation
'''
if 'offset' in self.attrs:
# we search for the first grouping node in the parents' list
# to take care of the case where the first parent is an <if> node
grouping_parent = self.parent
import grit.node.empty
while grouping_parent and not isinstance(grouping_parent,
grit.node.empty.GroupingNode):
grouping_parent = grouping_parent.parent
assert 'first_id' in grouping_parent.attrs
return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']]
else:
return super(type(self), self).GetTextualIds()
def IsTranslateable(self):
return self.attrs['translateable'] == 'true'
def ItemFormatter(self, t):
# Only generate an output if the if condition is satisfied.
if not self.SatisfiesOutputCondition():
return super(type(self), self).ItemFormatter(t)
if t == 'rc_header':
return grit.format.rc_header.Item()
elif t in ('rc_all', 'rc_translateable', 'rc_nontranslateable'):
return grit.format.rc.Message()
elif t == 'c_format' and self.SatisfiesOutputCondition():
return grit.format.c_format.Message()
elif t == 'js_map_format':
return grit.format.js_map_format.Message()
else:
return super(type(self), self).ItemFormatter(t)
def EndParsing(self):
super(type(self), self).EndParsing()
# Make the text (including placeholder references) and list of placeholders,
# then strip and store leading and trailing whitespace and create the
# tclib.Message() and a clique to contain it.
text = ''
placeholders = []
for item in self.mixed_content:
if isinstance(item, types.StringTypes):
text += item
else:
presentation = item.attrs['name'].upper()
text += presentation
ex = ' '
if len(item.children):
ex = item.children[0].GetCdata()
original = item.GetCdata()
placeholders.append(tclib.Placeholder(presentation, original, ex))
m = _WHITESPACE.match(text)
if m:
self.ws_at_start = m.group('start')
self.ws_at_end = m.group('end')
text = m.group('body')
self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups'])
self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != '']
description_or_id = self.attrs['desc']
if description_or_id == '' and 'name' in self.attrs:
description_or_id = 'ID: %s' % self.attrs['name']
assigned_id = None
if (self.attrs['use_name_for_id'] == 'true' and
self.SatisfiesOutputCondition()):
assigned_id = self.attrs['name']
message = tclib.Message(text=text, placeholders=placeholders,
description=description_or_id,
meaning=self.attrs['meaning'],
assigned_id=assigned_id)
self.InstallMessage(message)
def InstallMessage(self, message):
'''Sets this node's clique from a tclib.Message instance.
Args:
message: A tclib.Message.
'''
self.clique = self.UberClique().MakeClique(message, self.IsTranslateable())
for group in self.shortcut_groups_:
self.clique.AddToShortcutGroup(group)
if self.attrs['custom_type'] != '':
self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'],
clique.CustomType))
elif self.attrs['validation_expr'] != '':
self.clique.SetCustomType(
clique.OneOffCustomType(self.attrs['validation_expr']))
def SubstituteMessages(self, substituter):
'''Applies substitution to this message.
Args:
substituter: a grit.util.Substituter object.
'''
message = substituter.SubstituteMessage(self.clique.GetMessage())
if message is not self.clique.GetMessage():
self.InstallMessage(message)
def GetCliques(self):
if self.clique:
return [self.clique]
else:
return []
def Translate(self, lang):
'''Returns a translated version of this message.
'''
assert self.clique
msg = self.clique.MessageForLanguage(lang,
self.PseudoIsAllowed(),
self.ShouldFallbackToEnglish()
).GetRealContent()
return msg.replace('[GRITLANGCODE]', lang)
def NameOrOffset(self):
if 'name' in self.attrs:
return self.attrs['name']
else:
return self.attrs['offset']
def ExpandVariables(self):
'''We always expand variables on Messages.'''
return True
def GetDataPackPair(self, lang, encoding):
'''Returns a (id, string) pair that represents the string id and the string
in utf8. This is used to generate the data pack data file.
'''
from grit.format import rc_header
id_map = rc_header.Item.tids_
id = id_map[self.GetTextualIds()[0]]
message = self.ws_at_start + self.Translate(lang) + self.ws_at_end
# |message| is a python unicode string, so convert to a byte stream that
# has the correct encoding requested for the datapacks. We skip the first
# 2 bytes of text resources because it is the BOM.
if encoding == UTF8:
return id, message.encode('utf8')
if encoding == UTF16:
return id, message.encode('utf16')[2:]
# Default is BINARY
return id, message
# static method
def Construct(parent, message, name, desc='', meaning
|
jarnoln/mitasny
|
tasks/admin.py
|
Python
|
mit
| 238
| 0
|
from django.contrib import admin
from tasks import models
admin.site.register(m
|
odels.Project)
admin.site.register(models.Priority)
admin.site.register(models.TaskSt
|
atus)
admin.site.register(models.Phase)
admin.site.register(models.Task)
|
KaranToor/MA450
|
google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_exceptions.py
|
Python
|
apache-2.0
| 1,244
| 0.008039
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource execeptions."""
from googlecloudsdk.core
|
import exceptions
class Error(exceptions.Error):
"""A bas
|
e exception for all recoverable resource errors => no stack trace."""
pass
class InternalError(exceptions.InternalError):
"""A base exception for all unrecoverable resource errors => stack trace."""
pass
class ExpressionSyntaxError(Error):
"""Resource expression syntax error."""
pass
class ResourceRegistryAttributeError(exceptions.InternalError):
"""Missing or invalid resource registry attribute error."""
pass
class UnregisteredCollectionError(Error):
"""Unregistered resource collection error."""
pass
|
willb/wallaroo
|
clients/python-wallaroo/wallaroo/client/node.py
|
Python
|
apache-2.0
| 5,705
| 0.009465
|
# Copyright (c) 2013 Red Hat, Inc.
# Author: William Benton (willb@redhat.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from proxy import Proxy, proxied_attr
from proxy import proxied_attr_get as pag, proxied_attr_set as pas, proxied_attr_getset as pags
from arc_utils import arcmethod, uniq
from singleton import v as store_singleton
import errors
from errors import not_implemented, fail
from constants import PARTITION_GROUP, LABEL_SENTINEL_PARAM, LABEL_SENTINEL_PARAM_ATTR
from datetime import datetime
import calendar
import urllib
def ts():
now = datetime.utcnow()
return (calendar.timegm(now.utctimetuple()) * 1000000) + now.microsecond
class node(Proxy):
n
|
ame = property(pag("
|
name"))
memberships = property(*pags("memberships"))
identity_group = property(lambda self : self.cm.make_proxy_object("group", self.attr_vals["identity_group"], refresh=True))
provisioned = property(*pags("provisioned"))
last_updated_version = property(pag("last_updated_version"))
modifyMemberships = arcmethod(pag("memberships"), pas("memberships"), heterogeneous=True, preserve_order=True)
def getConfig(self, **options):
if options.has_key("version"):
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":options["version"]}, {})
return self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name))
def makeProvisioned(self):
self.provisioned = True
self.update()
def explain(self):
not_implemented()
def checkin(self):
metapath = "/meta/node/%s" % self.name
# now = datetime.utcnow().isoformat()
now = ts()
meta = self.cm.fetch_json_resource(metapath, False, default={})
meta["last-checkin"] = now
self.cm.put_json_resource(metapath, meta, False)
return now
def last_checkin(self):
metapath = "/meta/node/%s" % self.name
meta = self.cm.fetch_json_resource(metapath, False, default={})
return meta.has_key("last-checkin") and meta["last-checkin"] or 0
def whatChanged(self, old, new):
oc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":old}, {})
nc = self.cm.fetch_json_resource("/config/node/%s" % urllib.quote_plus(self.name), {"commit":new}, {})
ock = set(oc)
nck = set(nc)
params = set([p for p in (ock | nck) if p not in ock or p not in nck or oc[p] != nc[p]]) - set(["WALLABY_CONFIG_VERSION"])
mc_params = set([p for p in params if store_singleton().getParam(p).must_change])
subsystems = [store_singleton().getSubsys(sub) for sub in self.cm.list_objects("subsystem")]
restart, reconfig = [], []
for ss in subsystems:
ss.refresh
ssp = set(ss.parameters)
if ssp.intersection(mc_params):
restart.append(ss.name)
elif ssp.intersection(params):
reconfig.append(ss.name)
return [list(params), restart, reconfig]
# labeling support below
def getLabels(self):
memberships = self.memberships
if not PARTITION_GROUP in memberships:
return []
else:
partition = memberships.index(PARTITION_GROUP)
return memberships[partition+1:]
labels=property(getLabels)
def modifyLabels(self, op, labels, **options):
thestore = store_singleton()
memberships = self.memberships
current_labels = self.getLabels()
label_set = set(current_labels + [PARTITION_GROUP])
new_labels = []
if op == "ADD":
new_labels = current_labels + labels
pass
elif op == "REPLACE":
new_labels = labels
pass
elif op == "REMOVE":
new_labels = [label for label in current_labels if label not in labels]
else:
raise NotImplementedError("modifyLabels: operation " + op + " not understood")
just_memberships = [grp for grp in memberships if grp not in label_set]
new_memberships = uniq(just_memberships + [PARTITION_GROUP] + new_labels)
if "ensure_partition_group" in options and options["ensure_partition_group"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the ensure_partition_group option")
thestore.getPartitionGroup()
if "create_missing_labels" in options and options["create_missing_labels"] is not False:
if thestore is None:
raise RuntimeError("store singleton must be initialized before using the create_missing_labels option")
for missing_label in thestore.checkGroupValidity(new_labels):
thestore.addLabel(missing_label)
return self.modifyMemberships("REPLACE", new_memberships, {})
proxied_attr(node, "name")
proxied_attr(node, "memberships")
proxied_attr(node, "identity_group")
proxied_attr(node, "provisioned")
|
MisterPup/Ceilometer-Juno-Extension
|
ceilometer/alarm/evaluator/threshold.py
|
Python
|
apache-2.0
| 8,086
| 0
|
#
# Copyright 2013 Red Hat, Inc
#
# Author: Eoghan Glynn <eglynn@redhat.com>
# Author: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
from oslo.utils import timeutils
from ceilometer.alarm import evaluator
from ceilometer.alarm.evaluator import utils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
COMPARATORS = {
'gt': operator.gt,
'lt': operator.lt,
'ge': operator.ge,
'le': operator.le,
'eq': operator.eq,
'ne': operator.ne,
}
class ThresholdEvaluator(evaluator.Evaluator):
# the sliding evaluation window is extended to allow
# for reporting/ingestion lag
look_back = 1
# minimum number of datapoints within sliding window to
# avoid unknown state
quorum = 1
@classmethod
def _bound_duration(cls, alarm, constraints):
"""Bound the duration of the statistics query."""
now = timeutils.utcnow()
# when exclusion of weak datapoints is enabled, we extend
# the look-back period so as to allow a clearer sample count
# trend to be established
look_back = (cls.look_back if not alarm.rule.get('exclude_outliers')
else alarm.rule['evaluation_periods'])
window = (alarm.rule['period'] *
(alarm.rule['evaluation_periods'] + look_back))
start = now - datetime.timedelta(seconds=window)
LOG.debug(_('query stats from %(start)s to '
'%(now)s') % {'start': start, 'now': now})
after = dict(field='timestamp', op='ge', value=start.isoformat())
before = dict(field='timestamp', op='le', value=now.isoformat())
constraints.extend([before, after])
return constraints
@staticmethod
def _sanitize(alarm, statistics):
"""Sanitize statistics."""
LOG.debug(_('sanitize stats %s') % statistics)
if alarm.rule.get('exclude_outliers'):
key = operator.attrgetter('count')
mean = utils.mean(statistics, key)
stddev = utils.stddev(statistics, key, mean)
lower = mean - 2 * stddev
upper = mean + 2 * stddev
inliers, outliers = utils.anomalies(statistics, key, lower, upper)
if outliers:
LOG.debug(_('excluded weak datapoints with sample counts %s'),
[s.count for s in outliers])
statistics = inliers
else:
LOG.debug('no excluded weak datapoints')
# in practice statistics are always sorted by period start, not
# strictly required by the API though
statistics = statistics[-alarm.rule['evaluation_periods']:]
LOG.debug(_('pruned statistics to %d') % len(statistics))
return statistics
def _statistics(self, alarm, query):
"""Retrieve statistics over the current window."""
LOG.debug(_('stats query %s') % query)
try:
return self._client.statistics.list(
meter_name=alarm.rule['meter_name'], q=query,
period=alarm.rule['period'])
except Exception:
LOG.exception(_('ala
|
rm stats retrieval failed'))
return []
def _sufficient(self, alarm, statistics):
"""Check for the sufficiency of the data for evaluation.
Ensure there is suffi
|
cient data for evaluation, transitioning to
unknown otherwise.
"""
sufficient = len(statistics) >= self.quorum
if not sufficient and alarm.state != evaluator.UNKNOWN:
reason = _('%d datapoints are unknown') % alarm.rule[
'evaluation_periods']
reason_data = self._reason_data('unknown',
alarm.rule['evaluation_periods'],
None)
self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data)
return sufficient
@staticmethod
def _reason_data(disposition, count, most_recent):
"""Create a reason data dictionary for this evaluator type."""
return {'type': 'threshold', 'disposition': disposition,
'count': count, 'most_recent': most_recent}
@classmethod
def _reason(cls, alarm, statistics, distilled, state):
"""Fabricate reason string."""
count = len(statistics)
disposition = 'inside' if state == evaluator.OK else 'outside'
last = getattr(statistics[-1], alarm.rule['statistic'])
transition = alarm.state != state
reason_data = cls._reason_data(disposition, count, last)
if transition:
return (_('Transition to %(state)s due to %(count)d samples'
' %(disposition)s threshold, most recent:'
' %(most_recent)s')
% dict(reason_data, state=state)), reason_data
return (_('Remaining as %(state)s due to %(count)d samples'
' %(disposition)s threshold, most recent: %(most_recent)s')
% dict(reason_data, state=state)), reason_data
def _transition(self, alarm, statistics, compared):
"""Transition alarm state if necessary.
The transition rules are currently hardcoded as:
- transitioning from a known state requires an unequivocal
set of datapoints
- transitioning from unknown is on the basis of the most
recent datapoint if equivocal
Ultimately this will be policy-driven.
"""
distilled = all(compared)
unequivocal = distilled or not any(compared)
unknown = alarm.state == evaluator.UNKNOWN
continuous = alarm.repeat_actions
if unequivocal:
state = evaluator.ALARM if distilled else evaluator.OK
reason, reason_data = self._reason(alarm, statistics,
distilled, state)
if alarm.state != state or continuous:
self._refresh(alarm, state, reason, reason_data)
elif unknown or continuous:
trending_state = evaluator.ALARM if compared[-1] else evaluator.OK
state = trending_state if unknown else alarm.state
reason, reason_data = self._reason(alarm, statistics,
distilled, state)
self._refresh(alarm, state, reason, reason_data)
def evaluate(self, alarm):
if not self.within_time_constraint(alarm):
LOG.debug(_('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.') % alarm.alarm_id)
return
query = self._bound_duration(
alarm,
alarm.rule['query']
)
statistics = self._sanitize(
alarm,
self._statistics(alarm, query)
)
if self._sufficient(alarm, statistics):
def _compare(stat):
op = COMPARATORS[alarm.rule['comparison_operator']]
value = getattr(stat, alarm.rule['statistic'])
limit = alarm.rule['threshold']
LOG.debug(_('comparing value %(value)s against threshold'
' %(limit)s') %
{'value': value, 'limit': limit})
return op(value, limit)
self._transition(alarm,
statistics,
map(_compare, statistics))
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/tpgamefiles/rules/d20_actions/action02603_feat_divine_spell_power.py
|
Python
|
mit
| 346
| 0.040462
|
from toee import *
import tpactions
def G
|
etActionName():
return "Divine Spell Power"
def GetActionDefinitionFlags():
return D20ADF_None
def GetTargetingClassification():
return D20TC_Target0
def GetActionCostType():
return D20ACT_N
|
ULL
def AddToSequence(d20action, action_seq, tb_status):
action_seq.add_action(d20action)
return AEC_OK
|
commonsmachinery/commonshasher
|
config.py
|
Python
|
gpl-3.0
| 246
| 0.004065
|
WMC_RATE_LIMIT = 5
WMC_USER = ''
WMC_PASSWORD = ''
BLOCKHASH_COMMAND = 'blockhash'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/test'
BROKER_URL = 'amqp://guest@localhost/'
try:
from config_local import *
except ImportError:
pas
|
s
|
|
enritoomey/DiagramaDeRafagasyManiobras
|
diagramas_class.py
|
Python
|
mit
| 20,658
| 0.004211
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
import argparse
import json
class Diagramas(object):
def __init__(self, datos, w, h, den, units='SI'):
self.CAM = datos["CAM"]
self.sw = datos["sw"]
self.a3D = datos["a3D"]
self.MTOW = datos["MTOW"]
self.MLW = datos["MLW"]
self.MZFW = datos["MZFW"]
self.Vc = datos["Vc"]
self.clmax = datos["clmax"]
self.clmax_flap = datos["clmax_flap"]
self.clmin = datos["clmin"]
self.Zmo = datos["Zmo"]
self.W = w
self.h = h
self.den = den
self.units = units
self.carga_alar = {}
self.H = {}
self.Vs1 = {}
self.Vs0 = {}
self.Vsf = {}
self.Vd = {}
self.Va = {}
self.Vf = {}
self.Vf_n2 = {}
self.Vb = {}
self.Uref = {}
self.Uds = self.U = {}
self.Ude_25fts = {}
self.Ude_50fts = {}
self.Ude_60fts = {}
self.vel_label = {'IM': 'ft/s', 'SI': 'm/s'}
# constantes fijas:
self.ft2m = 0.3048
self.lb2kg = 0.453592
self.cte_fgz = {'IM': 250000}
self.cte_fgz['SI'] = self.cte_fgz['IM'] * self.ft2m
self.s = {'IM': 100.015}
self.s['SI'] = self.s['IM'] * self.ft2m
self.gravedad = {'SI': 9.81}
self.gravedad['IM'] = self.gravedad['SI'] / self.ft2m
self.cte_nmax_1 = {'IM': 24000}
self.cte_nmax_1['SI'] = self.cte_nmax_1['IM'] * self.lb2kg
self.cte_nmax_2 = {'IM': 10000}
self.cte_nmax_2['SI'] = self.cte_nmax_1['IM'] * self.lb2kg
self.cte_Uref_h1 = {'IM': 15000}
self.cte_Uref_h1['SI'] = self.cte_Uref_h1['IM'] * self.ft2m
self.cte_Uref_h2 = {'IM': 50000}
self.cte_Uref_h2['SI'] = self.cte_Uref_h2['IM'] * self.ft2m
self.cte_Uref_v1 = {'IM': 56}
self.cte_Uref_v1['SI'] = self.cte_Uref_v1['IM'] * self.ft2m
self.cte_Uref_v2 = {'IM': 56}
self.cte_Uref_v2['SI'] = self.cte_Uref_v2['IM'] * self.ft2m
self.cte_Uref_v3 = {'IM': 26}
self.cte_Uref_v3['SI'] = self.cte_Uref_v3['IM'] * self.ft2m
# Esta constante esta porque hay que usar la pendiente a_cn = dCn/dalpha, y no a_cl = dCl/dalpha, pero no se de donde sale el valor
self.ad_CN = 0.59248
self.cte_Vb = {'IM': 498.0} # lb/s**2
self.cte_Vb['SI'] = self.cte_Vb['IM'] * self.ft2m ** 4 / self.lb2kg
# Velocidad de rafadas
self.cte_Ude_h1 = {'IM': 20000}
self.cte_Ude_h1['SI'] = self.cte_Ude_h1['IM'] * self.ft2m
self.cte_Ude_h2 = {'IM': 50000}
self.cte_Ude_h2['SI'] = self.cte_Ude_h2['IM'] * self.ft2m
self.cte_25fts_v1 = {'IM': 25}
self.cte_25fts_v1['SI'] = self.cte_25fts_v1['IM'] * self.ft2m
self.cte_25fts_v2 = {'IM': 33.34}
self.cte_25fts_v2['SI'] = self.cte_25fts_v2['IM'] * self.ft2m
self.cte_25fts_m2 = 0.000417
self.cte_25fts_v3 = {'IM': 12.5}
self.cte_25fts_v3['SI'] = self.cte_25fts_v3['IM'] * self.ft2m
self.cte_50fts_v1 = {'IM': 50}
self.cte_50fts_v1['SI'] = self.cte_50fts_v1['IM'] * self.ft2m
self.cte_50fts_v2 = {'IM': 66.77}
self.cte_50fts_v2['SI'] = self.cte_50fts_v2['IM'] * self.ft2m
self.cte_50fts_m2 = 0.0008933
self.cte_50fts_v3 = {'IM': 25}
self.cte_50fts_v3['SI'] = self.cte_50fts_v3['IM'] * self.ft2m
self.cte_60fts_v1 = {'IM': 60}
self.cte_60fts_v1['SI'] = self.cte_60fts_v1['IM'] * self.ft2m
self.cte_60fts_v2 = {'IM': 60}
self.cte_60fts_v2['SI'] = self.cte_60fts_v2['IM'] * self.ft2m
self.cte_60fts_m2 = {'IM': 18}
self.cte_60fts_m2['SI'] = self.cte_60fts_m2['IM'] * self.ft2m
self.cte_60fts_v3 = {'IM': 38}
self.cte_60fts_v3['SI'] = self.cte_60fts_v3['IM'] * self.ft2m
# constantes relacionadas con el diagrama de rafagas
self.R1 = self.MLW[units] / self.MTOW[units]
self.R2 = self.MZFW[units] / self.MTOW[units]
self.fgm = np.sqrt(self.R2 * np.tan(np.pi * self.R1 / 4.0))
self.fgz = 1 - self.Zmo[units] / self.cte_fgz[units]
self.fg = 0.5 * (self.fg
|
z + self.fgm)
def calculos(self):
self.R1 = sel
|
f.MLW[self.units] / self.MTOW[self.units]
self.R2 = self.MZFW[self.units] / self.MTOW[self.units]
self.fgm = np.sqrt(self.R2 * np.tan(np.pi * self.R1 / 4.0))
self.fgz = 1 - self.Zmo[self.units] / self.cte_fgz[self.units]
self.fg = 0.5 * (self.fgz + self.fgm)
self.carga_alar[self.units] = self.W[self.units] / self.sw[self.units]
self.mu_g = 2 * self.carga_alar[self.units] / (self.den[self.units] * self.CAM[self.units] * self.a3D) # *gravedad[units])
self.Kg = 0.88 * (self.mu_g / (5.3 + self.mu_g))
self.Vs1[self.units] = np.sqrt((self.carga_alar[self.units] * self.gravedad[self.units]) / (0.5 * self.den[self.units] * self.clmax))
self.Vs0[self.units] = np.sqrt((-self.carga_alar[self.units] * self.gravedad[self.units]) / (0.5 * self.den[self.units] * self.clmin))
self.Vsf[self.units] = np.sqrt((self.carga_alar[self.units] * self.gravedad[self.units]) / (0.5 * self.den[self.units] * self.clmax_flap))
# Calculo de n_max
self.n_max = 2.1 + self.cte_nmax_1[self.units] / (self.MTOW[self.units] + self.cte_nmax_2[self.units])
if self.n_max < 2.5:
self.n_max = 2.5
elif self.n_max > 3.8:
self.n_max = 3.8
self.Va[self.units] = self.Vs1[self.units] * np.sqrt(self.n_max)
if self.Va[self.units] > self.Vc[self.units]:
self.Va[self.units] = self.Vc[self.units]
self.Vd[self.units] = self.Vc[self.units] / 0.85
self.Vf[self.units] = max(self.Vs1[self.units] * 1.6, self.Vsf[self.units] * 1.8)
if self.h[self.units] < self.cte_Uref_h1[self.units]:
self.Uref[self.units] = self.cte_Uref_v1[self.units] - 12.0 * self.h[self.units] / self.cte_Uref_h1[self.units]
elif self.h[self.units] < self.cte_Uref_h2[self.units]:
self.Uref[self.units] = self.cte_Uref_v2[self.units] - 18.0 * (self.h[self.units] - self.cte_Uref_h1[self.units]) / \
(self.cte_Uref_h2[self.units] - self.cte_Uref_h1[self.units])
else:
self.Uref[self.units] = self.cte_Uref_v3[self.units]
self.Vb[self.units] = min(self.Vc[self.units], self.Vs1[self.units] * np.sqrt(1 + self.Kg * self.Uref[self.units] * self.Vc[self.units] *
self.a3D * self.ad_CN / (self.cte_Vb[self.units] * self.carga_alar[self.units])))
if self.h[self.units] < self.cte_Ude_h1[self.units]:
self.Ude_25fts[self.units] = self.cte_25fts_v1[self.units]
self.Ude_50fts[self.units] = self.cte_50fts_v1[self.units]
self.Ude_60fts[self.units] = self.cte_60fts_v1[self.units]
elif self.h[self.units] < self.cte_Ude_h2[self.units]:
self.Ude_25fts[self.units] = self.cte_25fts_v2[self.units] - self.cte_25fts_m2 * self.h[self.units]
self.Ude_50fts[self.units] = self.cte_50fts_v2[self.units] - self.cte_50fts_m2 * self.h[self.units]
self.Ude_60fts[self.units] = self.cte_60fts_v2[self.units] - self.cte_60fts_m2[self.units] * \
(self.h[self.units] - self.cte_Ude_h1[self.units]) \
/(self.cte_Ude_h2[self.units] - self.cte_Ude_h1[self.units])
else:
self.Ude_25fts[self.units] = self.cte_25fts_v3[self.units]
self.Ude_50fts[self.units] = self.cte_50fts_v3[self.units]
self.Ude_60fts[self.units] = self.cte_60fts_v3[self.units]
self.Vf_n2[self.units] = np.sqrt(2 * self.W[self.units] * self.gravedad[self.units] / (0.5 * self.den[self.units] * self.clmax_flap * self.sw[self.units]))
def n_25fts(self, vel):
return self.fg * self.Ude_25fts[self.units] * self.a3D * self.ad_CN * vel / (self.cte_Vb[self.units] * self.carga_alar[self.units])
|
Glandos/FreeMobile-SMS
|
config.sample.py
|
Python
|
mit
| 82
| 0
|
# -*- coding: utf-8 -*-
users = {
'Jean-jacques': ('123
|
45678',
|
'api-key'),
}
|
safwanrahman/kitsune
|
kitsune/products/tests/test_templates.py
|
Python
|
bsd-3-clause
| 6,625
| 0.001057
|
from django.conf import settings
from django.core.cache import cache
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.products.models import HOT_TOPIC_SLUG
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.questions.models import QuestionLocale
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import DocumentFactory, ApprovedRevisionFactory, HelpfulVoteFactory
class ProductViewsTestCase(ElasticTestCase):
def test_products(self):
"""Verify that /products page renders products."""
# Create some products.
for i in range(3):
p = ProductFactory(visible=True)
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
# GET the products page and verify the content.
r = self.client.get(reverse('products'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(3, len(doc('#products-and-services li')))
def test_product_landing(self):
"""Verify that /products/<slug> page renders topics."""
# Create a product.
p = ProductFactory()
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
# Create some topics.
TopicFactory(slug=HOT_TOPIC_SLUG, product=p, visible=True)
topics = TopicFactory.create_batch(11, product=p, visible=True)
# Create a document and assign the product and 10 topics.
d = DocumentFactory(products=[p], topics=topics[:10])
ApprovedRevisionFactory(document=d)
self.refresh()
# GET the product landing page and verify the content.
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(11, len(doc('#help-topics li')))
eq_(p.slug, doc('#support-search input[name=product]').attr['value'])
def test_firefox_product_landing(self):
"""Verify that there are no firefox button at header in the firefox landing page"""
p = ProductFactory(slug="firefox")
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(False, doc(".firefox-download-button").length)
def test_document_listing(self):
"""Verify /products/<product slug>/<topic slug> renders articles."""
# Create a topic and product.
p = ProductFactory()
t1 = TopicFactory(product=p)
# Create 3 documents with the topic and product and one without.
ApprovedRevisionFactory.create_batch(3, document__products=[p], document__topics=[t1])
ApprovedRevisionFactory()
self.refresh()
# GET the page and verify the content.
url = reverse('products.documents', args=[p.slug, t1.slug])
r = self.client.get(url, follow=True)
eq_(200, r.s
|
tatus_code)
doc
|
= pq(r.content)
eq_(3, len(doc('#document-list > ul > li')))
eq_(p.slug, doc('#support-search input[name=product]').attr['value'])
def test_document_listing_order(self):
"""Verify documents are sorted by display_order and number of helpful votes."""
# Create topic, product and documents.
p = ProductFactory()
t = TopicFactory(product=p)
docs = []
# FIXME: Can't we do this with create_batch and build the document
# in the approvedrevisionfactory
for i in range(3):
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
docs.append(doc)
# Add a lower display order to the second document. It should be first now.
docs[1].display_order = 0
docs[1].save()
self.refresh()
url = reverse('products.documents', args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc('#document-list > ul > li:first-child > a').text(),
docs[1].title)
# Add a helpful vote to the third document. It should be second now.
rev = docs[2].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
docs[2].save() # Votes don't trigger a reindex.
self.refresh()
cache.clear() # documents_for() is cached
url = reverse('products.documents', args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc('#document-list > ul > li:nth-child(2) > a').text(),
docs[2].title)
# Add 2 helpful votes the first document. It should be second now.
rev = docs[0].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
HelpfulVoteFactory(revision=rev, helpful=True)
docs[0].save() # Votes don't trigger a reindex.
self.refresh()
cache.clear() # documents_for() is cached
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc('#document-list > ul > li:nth-child(2) > a').text(),
docs[0].title)
def test_subtopics(self):
"""Verifies subtopics appear on document listing page."""
# Create a topic and product.
p = ProductFactory()
t = TopicFactory(product=p, visible=True)
# Create a documents with the topic and product
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
self.refresh()
# GET the page and verify no subtopics yet.
url = reverse('products.documents', args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc('li.subtopic')))
# Create a subtopic, it still shouldn't show up because no
# articles are assigned.
subtopic = TopicFactory(parent=t, product=p, visible=True)
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc('li.subtopic')))
# Add a document to the subtopic, now it should appear.
doc.topics.add(subtopic)
self.refresh()
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(1, len(pqdoc('li.subtopic')))
|
ifduyue/sentry
|
src/sentry/api/exceptions.py
|
Python
|
bsd-3-clause
| 2,003
| 0.000499
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.exceptions import APIException
class ResourceDoesNotExist(APIException):
status_code = status.HTTP_404_NOT_FOUND
class SentryAPIException(APIExceptio
|
n):
code = ''
message = ''
def __init__(self, code=None, message=None, detail=None, **kwargs):
if detail is None:
detail = {
'code': code or self.code,
'message': message or self.message,
'extra': kwargs,
}
super(SentryAPIException, self).__init__(detail=detail)
class ProjectMoved(SentryAPIException):
status_code = status.HTTP_302_FOUND
# code/message curre
|
ntly don't get used
code = 'resource-moved'
message = 'Resource has been moved'
def __init__(self, new_url, slug):
super(ProjectMoved, self).__init__(
url=new_url,
slug=slug,
)
class SsoRequired(SentryAPIException):
status_code = status.HTTP_401_UNAUTHORIZED
code = 'sso-required'
message = 'Must login via SSO'
def __init__(self, organization):
super(SsoRequired, self).__init__(
loginUrl=reverse('sentry-auth-organization', args=[organization.slug])
)
class SuperuserRequired(SentryAPIException):
status_code = status.HTTP_403_FORBIDDEN
code = 'superuser-required'
message = 'You need to re-authenticate for superuser.'
class SudoRequired(SentryAPIException):
status_code = status.HTTP_401_UNAUTHORIZED
code = 'sudo-required'
message = 'Account verification required.'
def __init__(self, user):
super(SudoRequired, self).__init__(username=user.username)
class TwoFactorRequired(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
code = '2fa-required'
message = 'Organization requires two-factor authentication to be enabled'
class InvalidRepository(Exception):
pass
|
Team4819/Yeti
|
tests/resources/engine/module2.py
|
Python
|
bsd-3-clause
| 96
| 0.010417
|
import yeti
class
|
ModuleUno(yeti.Module):
def module_init(self):
raise Excep
|
tion()
|
JaeGyu/PythonEx_1
|
MatplotlibEx.py
|
Python
|
mit
| 116
| 0.051724
|
import matplotlib.pyplot as pl
import random as rnd
a = rnd.sample(range(
|
10),10)
pri
|
nt([a])
pl.imshow([a])
|
genokan/Python-Learning
|
scraper.py
|
Python
|
gpl-2.0
| 443
| 0.031603
|
i
|
mport time
import urllib2
from urllib2 import urlopen
import datetime
from sys import argv
website = argv[1]
topSplit = '<div class=\"post\">'
bottomSplit = '<div class=\"sp_right\">'
def main():
try:
sourceCode = urllib2.urlopen(website).read()
#print sourceCode
sourceSplit = sourceCode.split(topSplit)[1].split(bottomSplit)[0]
print sourceSplit
except Exception,e:
print 'failed in the main loop'
print str(e)
main()
| |
kjwilcox/digital_heist
|
src/tile.py
|
Python
|
gpl-2.0
| 2,393
| 0.006268
|
#!/usr/bin/python3
import exhibition
from data import TILE_SIZE
import pygame
import collections
import logging
log = logging.getLogger(__name__)
DEBUG_RENDER_COORDS = True
class Tile:
""" A tile represents one tile in an area's map.
It has an image, a position rectangle, and an optional collision rectangle.
An abstract base class. Child classes must define an image."""
def __init__(self, pos):
""" Initializes a tile with position. No image or collision rect set. """
self.tile_pos = pos
x, y = pos
self.rect = pygame.Rect(x * TILE_SIZE, y * TILE_SIZE, TILE_SIZE, TILE_SIZE)
|
self.collision_rect = None
self.image = None
if DEBUG_RENDER_COORDS:
font = pygame.font.Font(None, 24)
self.coord_text = font.render("({}, {})".format(self.tile_pos[0], self.tile_pos[1]), True, (0, 0, 0, 100))
def render(self, camera):
""" Renders the map tile to the screen using the provided camera. """
screen = pygame.display.get_surface()
pos = camera.world_to_screen(self.rect.to
|
pleft)
screen.blit(self.image, pos)
if DEBUG_RENDER_COORDS:
x, y = pos
screen.blit(self.coord_text, (x + 4, y + 4))
##################################
class FloorTile(Tile):
def __init__(self, pos):
super().__init__(pos)
self.image = exhibition.images()["floor"]
class WallTile(Tile):
def __init__(self, pos):
super().__init__(pos)
self.collision_rect = self.rect
self.image = exhibition.images()["wall"]
class MissingTile(Tile):
def __init__(self, pos):
super().__init__(pos)
self.image = exhibition.images()["missing"]
log.error("Missing tile created at {}, {}".format(pos[0], pos[1]))
class VerticalDoorTile(Tile):
def __init__(self, pos):
super().__init__(pos)
self.image = exhibition.images()["vwalldoor"]
self.collision_rect = pygame.Rect(self.rect)
self.collision_rect.width /= 8
self.collision_rect.center = self.rect.center
# This tile mapping maps the integers in the map file format to the appropriate tile types.
# This dictionary IS the file format for the map.
tile_mapping = collections.defaultdict(MissingTile, {0: FloorTile, 1: WallTile, 2: VerticalDoorTile})
|
KAMI911/histogrammer
|
libs/timing.py
|
Python
|
mpl-2.0
| 609
| 0
|
# -*- coding: cp1250 -*-
try:
import datetime
import time
except ImportError as
|
err:
print("Error import module: " + str(err))
exit(128)
__author__ = 'kszalai'
class Timing:
def __init__(self):
self.start = time.clock()
def end(self):
elapsed = time.clock() - self.start
# return self.__seconds_to_str(elapsed)
return str(datetime.timedelta(seconds=elapsed))
def __seconds_to_str(self, t):
return "%d:%02d:%02
|
d.%03d" % \
reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],
[(t * 1000,), 1000, 60, 60])
|
Autostew/autostew
|
autostew_web_session/urls.py
|
Python
|
agpl-3.0
| 1,673
| 0.004782
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from autostew_web_session.models.models import Track
from autostew_web_session.models.session import SessionSetup, Session
from autostew_web_session.models.server import Server
from autostew_web_session.views import ParticipantDetailView, SessionList, TrackDetailView, SessionView
from . import views
app_name = 'session'
urlpatterns = [
url(r'^tracks/?$', ListView.as_view(model=Track), name='tracks'),
url(r'^tracks/(?P<pk>[0-9]+)/?$', TrackDet
|
ailView.as_view(), name='track'),
url(r'^list/?$', SessionList.as_view(), name='sessions'),
url(r'^servers/?$', ListView.as_view(model=Server), name='servers'),
url(r'^servers/(?P<slug>.+)/?$', DetailView.as_view(model=Server, slug_fie
|
ld='name'), name='server'),
url(r'^(?P<pk>[0-9]+)/?$', SessionView.as_view(), name='session'),
url(r'^(?P<pk>[0-9]+)/events/?$', views.SessionEvents.as_view(), name='events'),
url(r'^(?P<session_id>[0-9]+)/participant/(?P<participant_id>[0-9]+)/?$', ParticipantDetailView.as_view(), name='participant'),
url(r'^snapshot/(?P<pk>[0-9]+)/?$', SessionView.as_view(), name='snapshot'),
url(r'^setup/create/?$', login_required(views.CreateSessionView.as_view()), name='create_setup'),
url(r'^setup/list/?$', ListView.as_view(model=SessionSetup, queryset=SessionSetup.objects.filter(is_template=True)), name='setups'),
url(r'^setup/(?P<pk>[0-9]+)/?$', DetailView.as_view(model=SessionSetup), name='setup'),
]
|
agry/NGECore2
|
scripts/mobiles/endor/frenzied_donkuwah.py
|
Python
|
lgpl-3.0
| 1,767
| 0.032258
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setC
|
reatureName('frenzied_donkuwah')
mobileTemplate.setLevel(78)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(3)
mobileTemplate.setMaxSpawnDistance(5)
mobileTemplate.setDeathblow(True)
mobileTemplate.setSocialGroup('donkuwah tribe')
mobileTemplate.setAssistRange(1)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | O
|
ptions.ATTACKABLE)
mobileTemplate.setStalker(True)
templates = Vector()
templates.add('object/mobile/shared_jinda_male.iff')
templates.add('object/mobile/shared_jinda_female.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 65
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
lootPoolNames_2 = ['random_loot_primitives']
lootPoolChances_2 = [100]
lootGroupChance_2 = 35
mobileTemplate.addToLootGroups(lootPoolNames_2,lootPoolChances_2,lootGroupChance_2)
core.spawnService.addMobileTemplate('frenzied_donkuwah', mobileTemplate)
return
|
jolbyandfriends/python-bandsintown
|
bandsintown/client.py
|
Python
|
mit
| 4,358
| 0.000688
|
try:
from urllib.parse import quote, urljoin
except ImportError:
from urllib import quote
from urlparse import urljoin
import requests
class BandsintownError(Exception):
def __init__(self, message, response=None):
self.message = message
|
self.response = response
def __str__(self):
return self.message
class BandsintownInvalidAppIdError(BandsintownError):
pass
class BandsintownInvalidDateFormatError(BandsintownError):
pass
class Client(object):
api_base_url = 'https://rest.bandsintown.com'
def __init__(self, app_id):
"""
Args:
app_id: Required app id, can be any string
"""
self.app_id = app_id
self.default_params = {'app_id': self.app_id}
|
def request(self, path, params={}):
"""
Executes a request to the Bandsintown API and returns the response
object from `requests`
Args:
path: The API path to append to the base API URL for the request
params: Optional dict to tack on query string parameters to request
Returns:
Response object from `requests`
"""
url = urljoin(self.api_base_url, path)
request_params = self.default_params.copy()
request_params.update(params)
response = requests.get(
url,
headers={'Accept': 'application/json'},
params=request_params
)
data = response.json()
if 'message' in data and data['message'] == 'Missing required request parameters: [app_id]':
message = 'Missing required API key, which must be a single string argument to Client instantiation, e.g.: client = Client("my-app-id")'
raise BandsintownInvalidAppIdError(message, response)
else:
return data
def artists(self, artistname):
"""
Searches for a single artist using this endpoint:
https://app.swaggerhub.com/apis/Bandsintown/PublicAPI/3.0.0#/single_artist_information/artist
Args:
artistname: Artist name to search for
Returns:
A dict of artist data when the artist is found, and returns
None when not found
Usage:
client = Client(app_id='my-app-id')
client.artists('Bad Religion')
"""
try:
return self.request('artists/%s' % quote(artistname))
except ValueError:
# Currently the API's response when the artist doesn't exist is
# badly formed JSON. In such a case, we're catching the exception
# and returning None
return None
def artists_events(self, artistname, date=None):
"""
Searches for events for a single artist, with an optional date range,
using this endpoint:
https://app.swaggerhub.com/apis/Bandsintown/PublicAPI/3.0.0#/upcoming_artist_events/artistEvents
Args:
artistname: Artist name to search for
date: Optional date string filter, can be a specific date in the
format: "yyyy-mm-dd", a range "yyyy-mm-dd,yyyy-mm-dd", or can be a
few keyword values like "upcoming" or "all"
Returns:
A list of event data, which could be empty, None if artist not
found, raises `BandsintownInvalidDateFormatError` for bad `date`
param, or raises `BandsintownError` for other unknown error
Usage:
client = Client(app_id=1234)
client.artists_events('Bad Religion')
client.artists_events('Bad Religion', date='2018-02-01,2018-02-28')
"""
params = {}
if date:
params['date'] = date
data = self.request('artists/%s/events' % quote(artistname), params)
if 'errors' in data:
if data['errors'][0] == 'Invalid date format':
raise BandsintownInvalidDateFormatError(
'Invalid date parameter: "%s", must be in the format: "yyyy-mm-dd", or "yyyy-mm-dd,yyyy-mm-dd" for a range, or keywords "upcoming" or "all"' % date
)
elif data['errors'][0] == 'Unknown Artist':
return None
else:
raise BandsintownError('Unknown error with request', data)
return data
|
anoopvalluthadam/bztools
|
auto_nag/scripts/email_nag.py
|
Python
|
bsd-3-clause
| 24,245
| 0.003465
|
#!/usr/bin/env python
"""
A script for automated nagging emails based on passed in queries
These can be collated into several 'queries' through the use of multiple query files with
a 'query_name' param set eg: 'Bugs tracked for Firefox Beta (13)'
Once the bugs have been collected from Bugzilla they are sorted into buckets cc: assignee manager
and to the assignee(s) or need-info? for each query
"""
import sys
import os
import smtplib
import subprocess
import tempfile
import collections
from datetime import datetime
from argparse import ArgumentParser
from auto_nag.bugzilla.agents import BMOAgent
import phonebook
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates'))
REPLY_TO_EMAIL = 'release-mgmt@mozilla.com'
DEFAULT_CC = ['release-mgmt@mozilla.com']
EMAIL_SUBJECT = ''
SMTP = 'smtp.mozilla.org'
# TODO - Sort by who a bug is blocked on (thanks @dturner)
# TODO - write tests!
# TODO - look into knocking out duplicated bugs in queries -- perhaps print out if there are dupes in queries when queries > 1
# TODO - should compare bugmail from API results to phonebook bugmail in to_lower()
def get_last_manager_comment(comments, manager, person):
# go through in reverse order to get most recent
for comment in comments[::-1]:
if person is not None:
if comment.creator.name == manager['mozillaMail'] or comment.creator.name == manager['bugzillaEmail']:
return comment.creation_time.replace(tzinfo=None)
return None
def get_last_assignee_comment(comments, person):
# go through in reverse order to get most recent
for comment in comments[::-1]:
if person is not None:
if comment.creator.name == person['mozillaMail'] or comment.creator.name == person['bugzillaEmail']:
return comment.creation_time.replace(tzinfo=None)
return None
def query_url_to_dict(url):
if (';')in url:
fields_and_values = url.split("?")[1].split(";")
else:
fields_and_values = url.split("?")[1].split("&")
d = collections.defaultdict(list)
for pair in fields_and_values:
(key, val) = pair.split("=")
if key != "list_id":
d[key].append(val)
return d
def generateEmailOutput(subject, queries, template, people, show_comment=False,
manager_email=None, rollup=False, rollupEmail=None):
cclist = []
toaddrs = []
template_params = {}
# stripping off the templates dir, just in case it gets passed in the args
template = env.get_template(template.replace('templates/', '', 1))
def addToAddrs(bug):
if bug.assigned_to.name in people.people_by_bzmail:
person = dict(people.people_by_bzmail[bug.assigned_to.name])
if person['mozillaMail'] not in toaddrs:
toaddrs.append(person['mozillaMail'])
for query in queries.keys():
# Avoid dupes in the cclist from several queries
query_cc = queries[query].get('cclist', [])
for qcc in query_cc:
if qcc not in cclist:
cclist.append(qcc)
if query not in template_params:
template_params[query] = {'buglist': []}
if len(queries[query]['bugs']) != 0:
for bug in queries[query]['bugs']:
if 'show_summary' in queries[query]:
if queries[query]['show_summary'] == '1':
summary = bug.summary
else:
summary = ""
else:
summary = ""
template_params[query]['buglist'].append(
{
'id': bug.id,
'summary': summary,
# 'comment': bug.comments[-1].creation_time.replace(tzinfo=None),
'assignee': bug.assigned_to.real_name,
'flags': bug.flags
}
)
# more hacking for JS special casing
if bug.assigned_to.name == 'general@js.bugs' and 'nihsanullah@mozilla.com' not in toaddrs:
toaddrs.append('nihsanullah@mozilla.com')
# if needinfo? in flags, add the flag.requestee to the toaddrs instead of bug assignee
if bug.flags:
for flag in bug.flags:
if 'needinfo' in flag.name and flag.status == '?':
try:
person = dict(people.people_by_bzmail[str(flag.requestee)])
if person['mozillaMail'] not in toaddrs:
toaddrs.append(person['mozillaMail'])
except:
if str(flag.requestee) not in toaddrs:
toaddrs.append(str(flag.requestee))
else:
addToAddrs(bug)
else:
addToAddrs(bug)
message_body
|
= template.render(queries=template_params, show_comment=show_comment)
if manager_email is not None and manager_email not in cclist:
cclist.append(manager_email)
|
# no need to and cc the manager if more than one email
if len(toaddrs) > 1:
for email in toaddrs:
if email in cclist:
toaddrs.remove(email)
if cclist == ['']:
cclist = None
if rollup:
joined_to = ",".join(rollupEmail)
else:
joined_to = ",".join(toaddrs)
message = (
"From: %s\r\n" % REPLY_TO_EMAIL
+ "To: %s\r\n" % joined_to
+ "CC: %s\r\n" % ",".join(cclist)
+ "Subject: %s\r\n" % subject
+ "\r\n"
+ message_body)
toaddrs = toaddrs + cclist
return toaddrs, message
def sendMail(toaddrs, msg, username, password, dryrun=False):
if dryrun:
print "\n****************************\n* DRYRUN: not sending mail *\n****************************\n"
print msg
else:
server = smtplib.SMTP_SSL(SMTP, 465)
server.set_debuglevel(1)
server.login(username, password)
# note: toaddrs is required for transport agents, the msg['To'] header is not modified
server.sendmail(username, toaddrs, msg)
server.quit()
if __name__ == '__main__':
parser = ArgumentParser(__doc__)
parser.set_defaults(
dryrun=False,
username=None,
password=None,
roll_up=False,
show_comment=False,
email_cc_list=None,
queries=[],
days_since_comment=-1,
verbose=False,
keywords=None,
email_subject=None,
no_verification=False,
)
parser.add_argument("-d", "--dryrun", dest="dryrun", action="store_true",
help="just do the query, and print emails to console without emailing anyone")
parser.add_argument("-m", "--mozilla-email", dest="mozilla_mail",
help="specify a specific address for sending email"),
parser.add_argument("-p", "--email-password", dest="email_password",
help="specify a specific password for sending email")
parser.add_argument("-b", "--bz-api-key", dest="bz_api_key",
help="Bugzilla API key")
parser.add_argument("-t", "--template", dest="template",
required=True,
help="template to use for the buglist output")
parser.add_argument("-e", "--email-cc-list", dest="email_cc_list",
action="append",
help="email addresses to include in cc when sending mail")
parser.add_argument("-q", "--query", dest="queries",
action="append",
required=True,
help="a file containing a dictionary of a bugzilla query")
parser.add_argument("-k", "--keyword", dest="keywords",
action="append",
help="keywords to collate buglists")
parser.add_argument("-s", "--subject", dest="email_subject",
req
|
stfc/MagDB
|
client/magdb-discover.py
|
Python
|
apache-2.0
| 5,206
| 0.00365
|
#!/usr/bin/env python2
from os import listdir
from os.path import isfile, isdir, join, dirname
from re import sub
from urllib import urlopen
from subprocess import Popen, PIPE
from configparser import ConfigParser
import fcntl, socket, struct
def getHwAddr(ifname):
"""The pure python solution for this problem under Linux to get the MAC for a specific local interface,
originally posted as a comment by vishnubob and improved by on Ben Mackey in http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def bonding(config):
try:
out, err = Popen([config['binaries']['quattor-query'], "/hardware/name"], stdout=PIPE).communicate()
system_id = out.splitlines()[-1].split("'")[1].replace('system','')
except:
system_id = False
mac_addresses = {}
if system_id:
record = {
"systemId" : system_id,
"bonds" : {}
}
if isdir(config['paths']['bonding']):
bonds = listdir(config['paths']['bonding'])
if bonds:
for bond in bonds:
bond_file = join(config['paths']['bonding'],bond)
if isfile(bond_file) and 'bond' in bond:
# Read bond information and tokenise
fh = open(bond_file)
|
data = fh.read()
fh.close()
data = data.splitlines()
data = [ l.split(': ', 1) for l in data ]
# Initialise structure
sections = [{}]
for line in data:
if len(li
|
ne) == 2:
key, value = line
# Munge the keys slightly
key = sub(r'\(.+\)', '', key)
key = key.title().replace(' ', '')
sections[-1][key] = value
else:
sections.append({})
record["bonds"][bond] = sections
# Store the mac addresses behind bonded links for later use
for section in sections:
if 'PermanentHwAddr' in section:
mac_addresses[section['SlaveInterface']] = section['PermanentHwAddr']
print "Submitting bonding data to MagDB."
record = str(record).replace("'", '"')
try:
f = urlopen(config['urls']['bonding'], "system="+system_id+"&record="+record)
print "MagDB says: " + f.read()
except IOError:
print "Unable to submit results to MagDB"
else:
print "No network bonds found."
else:
print "No bonding information on system."
else:
print "Unable to determine systemId, will not look for network bonds."
return mac_addresses
def lldp(config, mac_addresses):
try:
out, err = Popen([config['binaries']['lldpctl'], "-f", "keyvalue"], stdout=PIPE).communicate()
except:
out = False
if out:
out = out.split('\n')[:-1]
data = []
for line in out:
if 'via=LLDP' in line:
data.append({})
if 'unknown-tlvs' in line:
continue
key, value = line.split('=')
key = key.split('.')[1:]
leaf = data[-1]
for k in key[:-1]:
if k not in leaf:
leaf[k] = {}
leaf = leaf[k]
leaf[key[-1]] = value.replace("'", "`")
# Initialise structure
record = []
for d in data:
link = {}
rid = 0
for k, v in d.iteritems():
rid = int(v['rid'])
# If the port is a member of a bonded link, the apparent mac address may have changed therefore we should use the mac address behind the bond
if k in mac_addresses:
mac = mac_addresses[k]
else:
mac = getHwAddr(k)
link[mac] = v
link[mac]['name'] = k
if rid <= 1:
record.append(link)
print "Submitting LLDP data to MagDB."
record = str(record).replace("'", '"')
try:
f = urlopen(config['urls']['lldp'], "record="+record)
print "MagDB says: " + f.read()
except IOError:
print "Unable to submit results to MagDB"
else:
print "No LLDP data found."
print "Complete."
def main():
config = ConfigParser()
config.read(['/etc/magdb-discover.conf', join(dirname(__file__), 'magdb-discover.conf')])
mac_addresses = bonding(config)
lldp(config, mac_addresses)
if __name__ == "__main__":
main()
|
dssg/tweedr
|
tweedr/emr/gnip_wc.py
|
Python
|
mit
| 1,253
| 0.00399
|
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
import json
class WordCount(MRJob):
'''
The default MRJob.INPUT_PROTOCOL is `RawValuePr
|
otocol`, but we are reading tweets,
so we'll add a
|
parser before we even get to the mapper.
'''
# incoming line needs to be parsed (I think), so we set a protocol to do so
INPUT_PROTOCOL = JSONValueProtocol
def mapper(self, key, line):
'''The key to the first mapper in the step-pipeline is always None.'''
# GNIP-style streams sometimes have metadata lines, but we can just ignore them
if 'info' in line and line['info']['message'] == 'Replay Request Completed':
return
# GNIP-style tweets have the tweet text in {'body': '...'} instead of the standard {'text': '...'}
if 'body' not in line:
raise Exception('Missing body field in tweet:\n ' + json.dumps(line))
text = line['body']
yield '~~~TOTAL~~~', 1
for token in text.split():
yield token.lower(), 1
def combiner(self, key, value_iter):
yield key, sum(value_iter)
def reducer(self, key, value_iter):
yield key, sum(value_iter)
if __name__ == '__main__':
WordCount.run()
|
WiproOpenSourcePractice/bdreappstore
|
enu/real_time_event_detection/hadoopstream/reducer_post.py
|
Python
|
apache-2.0
| 2,141
| 0.015413
|
#!/usr/bin/env python
import sys
import os
os.environ['MPLCONFIGDIR'] = "/tmp/"
import pandas as pd
import numpy as np
import commands
import csv
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn import preprocessing
from sklearn.externals import joblib
current_key = None
key = None
dayList = []
def qt_rmvd( string ):
string = string.strip()
if string.startswith("'") and string.endswith("'"):
string = string[1:-1]
return string
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
key, values = line.split('\t', 1)
values = values[1:-1]
values = values.split(",")
day = qt_rmvd(values[0])
values = values[1:]
#print line, key, day ,values
#print key
#continue
if current_key is None:
current_key = key
if current_key == key:
dayList.append([day,qt_rmvd(values[0])])
else:
dayList.sort(key= lambda x: int(x[0].split("_")[-1]))
fname = "Event_DailyRecord_"+current_key.strip()+".csv"
f = open(fname,"wt")
w = csv.writer(f)
w.writerow(("Day","Event"))
for elem in dayList:
w.writerow((elem[0],elem[1]))
f.close()
#print commands.getoutput("ls")
#print commands.getoutput("hadoop fs -rm /user/dropuser/schlumberger-result/"+fname)
print commands.getoutput("hadoop fs -put "+fname+" /user/dropuser/schlumberger-result/")
dayList = []
current_key = key
dayList.append([day,qt_rmvd(values[0])])
if len(dayList) > 0:
dayList.sort(key= lambda x: int(x[0].split("_")[-1]))
fname = "Event_DailyRecord_"+current_key.strip()+".csv"
f = open(fname,"wt")
|
w = csv.writer(f)
w.writerow(("Day","Event"))
for elem in dayList:
w.writerow((elem[0],elem[1]))
f.close()
#print commands.getoutput("ls")
#print commands.getoutput("hadoop fs -rm /user/dropuser/schlumberger-result/"+fname)
print commands.getoutput("hadoop f
|
s -put "+fname+" /user/dropuser/schlumberger-result/")
|
apllicationCOM/youtube-dl-api-server
|
youtube_dl_server/youtube_dl/extractor/porn91.py
|
Python
|
unlicense
| 2,548
| 0.000808
|
# encoding: utf-8
from __future__ import unicode_literals
from ..compat import compat_urllib_parse
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
ExtractorError,
)
class Porn91IE(InfoExtractor):
IE_NAME = '91porn'
_VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)'
_TEST = {
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
'md5': '6df8f6d028bc8b14f5dbd73af742fb20',
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://91porn.com/view_video.php?viewkey=%s' % video_id
self._set_cookie('91porn.com', 'language', 'cn_CN')
webpage = self._downl
|
oad_webpage(url, video_id, 'get HTML content')
if '作为游客,你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
title = self._search_regex(
r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title')
title = title.replace('\n', '')
# get real url
|
file_id = self._search_regex(
r'so.addVariable\(\'file\',\'(\d+)\'', webpage, 'file id')
sec_code = self._search_regex(
r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
max_vid = self._search_regex(
r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
url_params = compat_urllib_parse.urlencode({
'VID': file_id,
'mp4': '1',
'seccode': sec_code,
'max_vid': max_vid,
})
info_cn = self._download_webpage(
'http://91porn.com/getfile.php?' + url_params, video_id,
'get real video url')
video_url = self._search_regex(r'file=([^&]+)&', info_cn, 'url')
duration = parse_duration(self._search_regex(
r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False))
comment_count = int_or_none(self._search_regex(
r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False))
return {
'id': video_id,
'title': title,
'url': video_url,
'duration': duration,
'comment_count': comment_count,
}
|
skilstak/code-dot-org-python
|
solutions/stage19-artist5/s1level108.py
|
Python
|
unlicense
| 449
| 0.013363
|
import sys
sys.path.append('../..')
import codestudio
z = codestudio.load('s1leve
|
l108')
z.speed = 'faster'
def draw_tree(depth,branches):
if depth > 0:
z.color = z.random_color()
z.move_forward(7*depth)
z.turn_left(130)
for count in range(branches):
z.turn_right(180/branches)
draw_tree(depth-1,branches)
z.turn_left(50)
|
z.jump_backward(7*depth)
draw_tree(9,2)
z.wait()
|
ndparker/tdi
|
tests/tools/js_escape_string.py
|
Python
|
apache-2.0
| 1,877
| 0.007991
|
#!/usr/bin/env python
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
from tdi.tools import javascript
x = javascript.escape_string(u'\xe9--"\'\\-----]]></script>')
print type(x).__name__, x
x = javascript.escape_string(u'\xe9---"\'\\----]]></script>', inlined=False)
print type(x).__name__, x
x = javascript.e
|
scape_string('\xe9--"\'\\-----]]></script>')
print type(x).__name__, x
x = javascript.escape_string('\xe9---"\'\\----]]></script>', inlined=False)
print type(x).__name__, x
try:
x = javascript.escape_string('\xe9--"\'\\-----]]></script>',
encoding='utf-8'
)
except UnicodeError:
print "UnicodeError - OK"
try:
x = javascript.
|
escape_string('\xe9--"\'\\-----]]></script>',
inlined=False, encoding='utf-8'
)
except UnicodeError:
print "UnicodeError - OK"
x = javascript.escape_string('\xc3\xa9---"\'\\----]]></script>',
encoding='utf-8'
)
print type(x).__name__, x
x = javascript.escape_string('\xc3\xa9---"\'\\----]]></script>',
inlined=False, encoding='utf-8'
)
print type(x).__name__, x
# Bigunicode test: 𝔞 - MATHEMATICAL FRAKTUR SMALL A
# 1st: the real character must be replaced by surrogates.
# 2nd: The unreal one must stay.
a, s = u'a', u'\\'
for u in ('5\xd8\x1e\xdd'.decode("utf-16-le"), u'\\U0001d51e'):
for c in xrange(5):
x = javascript.escape_string(s * c + u + u'--"\'\\-----]]></script>')
print type(x).__name__, x
x = javascript.escape_string(s * c + u + u'--"\'\\-----]]></script>',
inlined=False
)
print type(x).__name__, x
x = javascript.escape_string(a + s * c + u + u'-"\'\\---]]></script>')
print type(x).__name__, x
x = javascript.escape_string(a + s * c + u + u'-"\'\\---]]></script>',
inlined = False
)
print type(x).__name__, x
|
davinellulinvega/COM1005
|
Lab8/wipe.py
|
Python
|
gpl-3.0
| 2,414
| 0.000829
|
names = list()
times = list()
keys = list()
names.append("HeadPitch")
times.append([0.96, 1.68, 3.28, 3.96, 4.52, 5.08])
keys.append([-0.0261199, 0.427944, 0.308291, 0.11194, -0.013848, 0.061318])
names.append("HeadYaw")
times.append([0.96, 1.68, 3.28, 3.96, 4.52, 5.08])
keys.append([-0.234743, -0.622845, -0.113558, -0.00617796, -0.027654, -0.036858])
names.append("LElbowRoll")
times.append([0.8, 1.52, 3.12, 3.8, 4.36, 4.92])
keys.append([-0.866668, -0.868202, -0.822183, -0.992455, -0.966378, -0.990923])
names.append("LElbowYaw")
times.append([0.8, 1.52, 3.12, 3.8, 4.36, 4.92])
keys.append([-0.957257, -0.823801, -1.00788, -0.925044, -1.24412, -0.960325])
names.append("LHand")
times.append([1.52, 3.12, 3.8, 4.92])
keys.append([0.132026, 0.132026, 0.132026, 0.132026])
names.append("LShoulderPitch")
times.append([0.8, 1.52, 3.12, 3.8, 4.36, 4.92])
keys.append([0.863599, 0.858999, 0.888144, 0.929562, 1.017, 0.977116])
names.append("LShoulderRoll")
times.append([0.8, 1.52, 3.12, 3.8, 4.36, 4.92])
keys.append([0.286815, 0.230059, 0.202446, 0.406468, 0.360449, 0.31903])
names.append("LWristYaw")
times.append([1.52, 3.12, 3.8, 4.92])
keys.append([0.386526, 0.386526, 0.386526, 0.386526])
names.append("RElbowRoll")
times.append([0.64, 1.36, 2.96, 3.64, 4.2, 4.76])
keys.append([1.28093, 1.39752, 1.57239, 1.24105, 1.22571, 0.840674])
names.append("RElbowYaw")
times.append([0.64, 1.36, 2.96, 3.64, 4.2, 4.76])
keys.append([-0.128898, -0.285367, -0.15651, 0.754686, 1.17193, 0.677985])
names.append("RHand")
times.append([1.36, 2.96, 3.64, 4.76])
keys.append([0.166571, 0.166208, 0.166571, 0.166208])
names.append("RShoulderPitch")
times.append([0.64, 1.
|
36, 2.96, 3.64, 4.2, 4.76])
keys.append([0.0767419, -0.59515, -0.866668, -0.613558, 0.584497, 0.882091])
names.append("RShoulderRoll")
times.append([0.64, 1.36, 2.96, 3.64, 4.2, 4.76])
keys.append([-0.019984, -0.019984, -0.615176, -0.833004, -0.224006, -0.214801])
names.append("RWristY
|
aw")
times.append([1.36, 2.96, 3.64, 4.76])
keys.append([-0.058334, -0.0521979, -0.067538, -0.038392])
def run_animation(motion):
"""Use the motion module to run the angular interpolations and execute the animation
:param motion: the ALMotion module
:return the id of the request
"""
# Request the execution of the animation
motion_id = motion.post.angleInterpolation(names, keys, times, True)
return motion_id
|
brian-joseph-petersen/oply
|
interpreter/execute.py
|
Python
|
mit
| 503
| 0.035785
|
from interpreter.heap import stringify
from interpreter.interpret import DTREE, CTREE
cla
|
ss execute():
def __init__( self, program ):
self.len = 0
self.heap = {}
self.stack = []
H = "H" + str( self.len )
self.heap[H] = { "$": "nil" }
self.len = self.len + 1
self.stack.append( H )
for declaration in program[0]: DTREE( self
|
, declaration )
for command in program[1]: CTREE( self, command )
stringify( self )
|
GoodRx/pyramid-sendgrid-webhooks
|
tests/test_pyramid_sendgrid_webhooks.py
|
Python
|
mit
| 6,637
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pyramid_sendgrid_webhooks
----------------------------------
Tests for `pyramid_sendgrid_webhooks` module.
"""
from __future__ import unicode_literals
import unittest
import pyramid_sendgrid_webhooks as psw
from pyramid_sendgrid_webhooks import events, errors
class EventGrabber(object):
""" Grabs events as they're dispatched """
def __init__(self):
self.events = []
self.last = None
def __call__(self, event):
self.events.append(event)
self.last = event
def simple_app(global_config, **settings):
from pyramid.config import Configurator
config = Configurator(settings=settings)
config.include('pyramid_sendgrid_webhooks', WebhookTestBase._PREFIX)
config.registry.grabber = EventGrabber()
config.add_subscriber(config.registry.grabber, events.BaseWebhookEvent)
return config.make_wsgi_app()
class WebhookTestBase(unittest.TestCase):
_PREFIX = '/webhook'
_PATH = _PREFIX + '/receive'
def setUp(self):
from pyramid import testing
self.request = testing.DummyRequest()
self.config = testing.setUp(request=self.request)
def tearDown(self):
from pyramid import testing
testing.tearDown()
def _createGrabber(self, event_cls=events.BaseWebhookEvent):
grabber = EventGrabber()
self.config.add_subscriber(grabber, event_cls)
return grabber
def _createRequest(self, event_body):
if not isinstance(event_body, list):
event_body = [event_body]
self.request.json_body = event_body
return self.request
def _createApp(self, event_cls=events.BaseWebhookEvent):
from webtest.app import TestApp
app = TestApp(simple_app({}))
app.grabber = app.app.registry.grabber
return app
class TestBaseEvents(WebhookTestBase):
def _makeOne(self, event_type='bounce', category='category'):
return {
'asm_group_id': 1,
'category': category,
'cert_error': '0',
'email': 'email@example.com',
'event': event_type,
'ip': '127.0.0.1',
'reason': '500 No Such User',
'smtp-id': '<original-smtp-id@domain.com>',
'status': '5.0.0',
'timestamp': 1249948800,
'tls': '1',
'type': 'bounce',
'unique_arg_key': 'unique_arg_value',
}
def _create_dt(self):
import datetime
return datetime.datetime(2009, 8, 11, 0, 0)
def test_event_parsed(self):
grabber
|
= self._createGrabber()
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertEqual(len(grabber.events), 1)
def test_event_parsed_from_request(self):
app = self._createApp()
grabber = app.grabber
app.post_json(self._PATH, [self._makeOne()])
self.assertEqual(len(grabber.events), 1)
def t
|
est_multiple_events_parsed_from_request(self, n=3):
app = self._createApp()
grabber = app.grabber
app.post_json(self._PATH, [self._makeOne()] * n)
self.assertEqual(len(grabber.events), n)
def test_specific_event_caught(self):
grabber = self._createGrabber(events.BounceEvent)
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertEqual(len(grabber.events), 1)
def test_unspecified_event_ignored(self):
grabber = self._createGrabber(events.DeferredEvent)
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertEqual(len(grabber.events), 0)
def test_timestamp_parsed(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertEqual(grabber.last.dt, self._create_dt())
def test_unique_arguments_extracted(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertDictEqual(grabber.last.unique_arguments, {
'unique_arg_key': 'unique_arg_value',
})
def test_correct_subclass(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertIsInstance(grabber.last, events.BounceEvent)
def test_unknown_event_raises_exception(self):
request = self._createRequest(self._makeOne(event_type='UNKNOWN'))
self.assertRaises(
errors.UnknownEventError, psw.receive_events, request)
def test_single_category_is_list_wrapped(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne())
psw.receive_events(request)
self.assertEqual([grabber.last.category], grabber.last.categories)
def test_multiple_categories_are_unchanged(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne(category=['c1', 'c2']))
psw.receive_events(request)
self.assertEqual(grabber.last.category, grabber.last.categories)
def test_empty_categories_is_empty_list(self):
grabber = self._createGrabber()
request = self._createRequest(self._makeOne(category=None))
psw.receive_events(request)
self.assertEqual(grabber.last.categories, [])
class TestDeliveryEvents(WebhookTestBase):
def _makeOne(self):
return {
'asm_group_id': 1,
'category': ['category1', 'category2'],
'cert_error': '0',
'email': 'email@example.com',
'event': 'bounce',
'ip': '127.0.0.1',
'reason': '500 No Such User',
'smtp-id': '<original-smtp-id@domain.com>',
'status': '5.0.0',
'timestamp': 1249948800,
'tls': '1',
'type': 'bounce',
'unique_arg_key': 'unique_arg_value',
}
class TestEngagementEvents(WebhookTestBase):
def _makeOne(self):
return {
'asm_group_id': 1,
'category': ['category1', 'category2'],
'email': 'email@example.com',
'event': 'click',
'ip': '255.255.255.255',
'timestamp': 1249948800,
'unique_arg_key': 'unique_arg_value',
'url': 'http://yourdomain.com/blog/news.html',
'useragent': 'Example Useragent',
}
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
unclev/vk.unclev.ru
|
extensions/forwarded_messages.py
|
Python
|
mit
| 1,156
| 0.021683
|
# coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2013 — 2015.
from datetime import datetime
if not require("attachments"):
raise AssertionError("'forwardMessages' requires 'attachments'")
BASE_SPACER = chr(32) + unichr(183) + chr(32)
def parseForwar
|
dedMessages(self, msg, depth=0):
body = ""
if msg.has_key("fwd_messages"):
spacer = BASE_SPACER * depth
body = "\n" + spacer
body += _("Forwarded messages:")
fwd_messages = sorted(msg["fwd_messages"], sortMsg)
for fwd in fwd_messages:
|
source = fwd["user_id"]
date = fwd["date"]
fwdBody = escape("", uhtml(compile_eol.sub("\n" + spacer + BASE_SPACER, fwd["body"])))
date = datetime.fromtimestamp(date).strftime("%d.%m.%Y %H:%M:%S")
name = self.vk.getUserData(source)["name"]
body += "\n%s[%s] <%s> %s" % (spacer + BASE_SPACER, date, name, fwdBody)
body += parseAttachments(self, fwd, spacer + (BASE_SPACER * 2))
if depth < MAXIMUM_FORWARD_DEPTH:
body += parseForwardedMessages(self, fwd, (depth + 1))
return body
if not isdef("MAXIMUM_FORWARD_DEPTH"):
MAXIMUM_FORWARD_DEPTH = 29
registerHandler("msg01", parseForwardedMessages)
|
Tayamarn/socorro
|
socorro/unittest/external/postgresql/unittestbase.py
|
Python
|
mpl-2.0
| 3,175
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from configman import ConfigurationManager, Namespace
from configman.converters import list_converter, class_converter
from socorro.external.postgresql.connection_context import ConnectionContext
from socorro.unittest.testbase import TestCase
class PostgreSQLTestCase(TestCase):
"""Base class for PostgreSQL related unit tests. """
app_name = 'PostgreSQLTestCase'
app_version = '1.0'
app_description = __doc__
metadata = ''
required_config = Namespace()
# we use this class here because it is a convenient way to pull in
# both a database connection context and a transaction executor
required_config.add_option(
'crashstorage_class',
default='socorro.external.postgres
|
ql.crashstorage.'
'PostgreSQLCrashStorage',
from_string_converter=class_converter
)
required_config.add_option(
name='database_superusername',
default='test',
doc='Username to connect to database',
)
|
required_config.add_option(
name='database_superuserpassword',
default='aPassword',
doc='Password to connect to database',
)
required_config.add_option(
name='dropdb',
default=False,
doc='Whether or not to drop database_name',
exclude_from_print_conf=True,
exclude_from_dump_conf=True
)
required_config.add_option(
'platforms',
default=[{
"id": "windows",
"name": "Windows NT"
}, {
"id": "mac",
"name": "Mac OS X"
}, {
"id": "linux",
"name": "Linux"
}],
doc='Array associating OS ids to full names.',
)
required_config.add_option(
'non_release_channels',
default=['beta', 'aurora', 'nightly'],
doc='List of channels, excluding the `release` one.',
from_string_converter=list_converter
)
required_config.add_option(
'restricted_channels',
default=['beta'],
doc='List of channels to restrict based on build ids.',
from_string_converter=list_converter
)
@classmethod
def get_standard_config(cls):
config_manager = ConfigurationManager(
[cls.required_config,
],
app_name='PostgreSQLTestCase',
app_description=__doc__,
argv_source=[]
)
with config_manager.context() as config:
return config
@classmethod
def setUpClass(cls):
"""Create a configuration context and a database connection.
This will create (and later destroy) one connection per test
case (aka. test class).
"""
cls.config = cls.get_standard_config()
cls.database = ConnectionContext(cls.config)
cls.connection = cls.database.connection()
@classmethod
def tearDownClass(cls):
"""Close the database connection. """
cls.connection.close()
|
urbn/kombu
|
t/unit/transport/test_librabbitmq.py
|
Python
|
bsd-3-clause
| 5,055
| 0
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock, patch, skip
try:
import librabbitmq
except ImportError:
librabbitmq = None # noqa
else:
from kombu.transport import librabbitmq # noqa
@skip.unless_module('librabbitmq')
class lrmqCase:
pass
class test_Message(lrmqCase):
def test_init(self):
chan = Mock(name='channel')
message = librabbitmq.Message(
chan, {'prop': 42}, {'delivery_tag': 337}, 'body',
)
assert message.body == 'body'
assert message.delivery_tag == 337
assert message.properties['prop'] == 42
class test_Channel(lrmqCase):
def test_prepare_message(self):
conn = Mock(name='connection')
chan = librabbitmq.Channel(conn, 1)
assert chan
body = 'the quick brown fox...'
properties = {'name': 'Elaine M.'}
body2, props2 = chan.prepare_message(
body, properties=properties,
priority=999,
content_type='ctype',
content_encoding='cenc',
headers={'H': 2},
)
assert props2['name'] == 'Elaine M.'
assert props2['priority'] == 999
assert props2['content_type'] == 'ctype'
assert props2['content_encoding'] == 'cenc'
assert props2['headers'] == {'H': 2}
assert body2 == body
body3, props3 = chan.prepare_message(body, priority=777)
assert props3['priority'] == 777
assert body3 == body
class test_Transport(lrmqCase):
def setup(self):
self.client = Mock(name='client')
self.T = librabbitmq.Transport(self.client)
def test_driver_version(self):
assert self.T.driver_version()
def test_create_channel(self):
conn = Mock(name='connection')
chan = self.T.create_channel(conn)
assert chan
conn.channel.assert_called_with()
def test_drain_events(self):
conn = Mock(name='connection')
self.T.drain_events(conn, timeout=1.33)
conn.drain_events.assert_called_with(timeout=1.33)
def test_establish_connection_SSL_not_supported(self):
self.client.ssl = True
with pytest.raises(NotImplementedError):
self.T.establish_connection()
def test_establish_connection(self):
self.T.Connection = Mock(name='Connection')
self.T.client.ssl = False
self.T.client.port = None
self.T.client.transport_options = {}
conn = self.T.establish_connection()
assert self.T.client.port == self.T.default_connection_params['port']
assert conn.client == self.T.client
assert self.T.client.drain_events == conn.drain_events
def test_collect__no_conn(self):
self.T.client.drain_events = 1234
self.T._collect(None)
assert self.client.drain_events is None
assert self.T.client is None
def test_collect__with_conn(self):
self.T.client.drain_events = 1234
conn = Mock(name='connection')
chans = conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')}
conn.callbacks = {'foo': Mock(name='cb1'), 'bar': Mock(name='cb2')}
for i, chan in enumerate(conn.channels.values()):
chan.connection = i
with patch('os.close') as close:
self.T._collect(conn)
close.assert_called_with(conn.fileno())
assert not conn.channels
assert not conn.callbacks
for chan in chans.values():
assert chan.connection is None
assert self.client.drain_events is None
assert self.T.client is None
with patch('os.close') as close:
self.T.client = self.client
close.side_effect = OSError()
self.T._collect(conn)
close.assert_called_with(conn.fileno())
def test_collect__with_fileno_raising_value_error(self):
conn = Mock(name='connection')
conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')}
with patch('os.close') as close:
self.T.client = self.client
conn.fileno.side_effect = ValueError("Socket not connected")
self.T._collect(conn)
close.assert_not_called()
conn.fileno.assert_called_with()
assert self.client.drain_events is None
assert self.T.client is None
def test_register_with_event_loop(self):
conn = Mock(name='conn')
loop = Mock(name='loop')
self.T.register_with_event_loop(conn, loop)
loop.add_reader.assert_called_with(
conn
|
.fileno(), self.T.on_readable, conn, loop,
)
def test_verify_connection(self):
conn
|
= Mock(name='connection')
conn.connected = True
assert self.T.verify_connection(conn)
def test_close_connection(self):
conn = Mock(name='connection')
self.client.drain_events = 1234
self.T.close_connection(conn)
assert self.client.drain_events is None
conn.close.assert_called_with()
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
tiny_hands_pac/settings/base.py
|
Python
|
mit
| 6,665
| 0.0006
|
"""
Django settings for tiny_hands_pac project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from os.path import abspath, dirname, join, normpath
from sys import path
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
PROJECT_ROOT = dirname(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# Do not set SECRET_KEY or LDAP password or any other sensitive data here.
# Instead, create a local.py file on the server.
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'wagtail.contrib.wagtailsitemaps',
'wagtail.contrib.wagtailsearchpromotions',
'wagtail.wagtailforms',
'wagtail.wagtailredirects',
'wagtail.wagtailembeds',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'wagtail.contrib.settings',
'wagtailfontawesome',
'utils',
'pages',
'blog',
'events',
'contact',
'people',
'photo_gallery',
'products',
'documents_gallery',
)
MIDDLEWARE_CLASSES
|
= (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'djang
|
o.middleware.security.SecurityMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'tiny_hands_pac.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug' : DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'pages.context_processors.site_url',
],
},
},
]
WSGI_APPLICATION = 'tiny_hands_pac.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tiny_hands_pac',
'USER': '',
'HOST': '', # Set to empty string for localhost.
'PORT': '', # Set to empty string for default.
'CONN_MAX_AGE': 600,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
MEDIA_URL = '/files/'
# Django compressor settings
# http://django-compressor.readthedocs.org/en/latest/settings/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
COMPRESS_OFFLINE = True
# Feeds app for Wagtail CMS
FEED_APP_LABEL = 'blog'
FEED_MODEL_NAME = 'BlogPage'
FEED_ITEM_DESCRIPTION_FIELD = 'intro'
FEED_ITEM_CONTENT_FIELD = 'body'
FEED_TITLE = 'Tiny Hands Big News'
FEED_LINK = '/news/'
FEED_DESCRIPTION = ""
FEED_AUTHOR_EMAIL = 'donaldtrumphastinyhands@gmail.com'
FEED_AUTHOR_LINK = 'https://www.donaldtrumphastinyhands.com'
# Settings for wagalytics
GA_KEY_FILEPATH = ''
GA_VIEW_ID = ''
# Google Maps Key
GOOGLE_MAPS_KEY = ''
DYNAMIC_MAP_URL = ''
STATIC_MAP_URL = ''
# Facebook Open Tags
FB_SITE_NAME = ''
FB_URL = ''
FB_DESCRIPTION = ''
FB_APP_ID = ''
# Twitter Cards
TWITTER_URL = ''
TWITTER_CREATOR = ''
TWITTER_DESCRIPTION = ''
# Use Redis as the cache backend for extra performance
# CACHES = {
# 'default': {
# 'BACKEND': 'redis_cache.cache.RedisCache',
# 'LOCATION': '127.0.0.1:6379',
# 'KEY_PREFIX': 'tiny_hands_pac',
# 'OPTIONS': {
# 'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
# }
# }
# }
# Wagtail settings
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = "Tiny Hands PAC"
WAGTAILSEARCH_RESULTS_TEMPLATE = 'utils/tags/search/search_results.html'
# Use Elasticsearch as the search backend for extra performance and better search results
# WAGTAILSEARCH_BACKENDS = {
# 'default': {
# 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
# 'INDEX': 'tiny_hands_pac',
# },
# }
# Celery settings
# When you have multiple sites using the same Redis server,
# specify a different Redis DB. e.g. redis://localhost/5
BROKER_URL = 'redis://'
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_LOG_COLOR = False
|
rooi/CouchPotatoServer
|
couchpotato/core/notifications/pushbullet/main.py
|
Python
|
gpl-3.0
| 2,483
| 0.012485
|
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
log = CPLog(__name__)
class Pushbullet(Notification):
url = 'https://api.pushbullet.com/api/%s'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
devices = self.getDevices()
if devices is None:
return False
# Get all the device IDs linked to this user
if not len(devices):
response = self.request('devices')
if not response:
return False
devices += [device.get('id') for device in response['devices']]
successful = 0
for device in devices:
response = self.request(
'pushes',
cache = False,
device_id = device,
type = 'note',
title = self.default_title,
body = toUnicode(message)
)
if response:
successful += 1
else:
log.error('Unable to push notification to Pushbullet device with ID %s' % device)
return successful == len(devices)
def getDevices(self):
devices = [d.strip() for d in self.conf('devices').split(',')]
# Remove empty items
devices = [d for d in devices if len(d)]
# Break on any ids that aren't integers
valid_devices = []
for device_id in devices:
d = tryInt(device_id, None)
if not d:
log.error('Device ID "%s" is not valid', device_id)
return None
valid_devices.append(d)
return valid_devices
def request(self, method, cache = True, **kwargs):
|
try:
base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1]
headers = {
"Authorization": "Basic %s" % base64string
}
if cache:
return self.getJsonData(self.url % method, headers = headers, data = kwargs)
else:
|
data = self.urlopen(self.url % method, headers = headers, data = kwargs)
return json.loads(data)
except Exception, ex:
log.error('Pushbullet request failed')
log.debug(ex)
return None
|
aerostitch/nagios_checks
|
hdfs_disk_usage_per_datanode.py
|
Python
|
gpl-2.0
| 3,375
| 0.002963
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Joseph Herlant <herlantj@gmail.com>
# File name: hdfs_disk_usage_per_datanode.py
# Creation date: 2014-10-08
#
# Distributed under terms of the GNU GPLv3 license.
"""
This nagios active check parses the Hadoop HDFS web interface url:
http://<namenode>:<port>/dfsnodelist.jsp?whatNodes=LIVE
to check for active datanodes that use disk beyond the given thresholds.
The output includes performance datas and is truncated if longer than 1024
chars.
Tested on: Hadoop CDH3U5
"""
__author__ = 'Joseph Herlant'
__copyright__ = 'Copyright 2014, Joseph
|
Herlant'
__credits__ = ['Joseph Herlant']
__license__ = 'GNU GPLv3'
__version__ = '1.0.2'
__maintainer__ = 'Joseph Herlant'
__email__ = 'herlantj@gmail.com'
__status__ = 'Production'
__website__ = 'https://github.com/aerostitch/'
from mechanize import Browser
from BeautifulSoup import BeautifulSoup
import argparse, sys
if __name__ == '__main__':
# use -h arg
|
ument to get help
parser = argparse.ArgumentParser(
description='A Nagios check to verify all datanodes disk usage in \
an HDFS cluster from the namenode web interface.')
parser.add_argument('-n', '--namenode', required=True,
help='hostname of the namenode of the cluster')
parser.add_argument('-p', '--port', type=int, default=50070,
help='port of the namenode http interface. \
Defaults to 50070.')
parser.add_argument('-w', '--warning', type=int, default=80,
help='warning threshold. Defaults to 80.')
parser.add_argument('-c', '--critical', type=int, default=90,
help='critical threshold. Defaults to 90.')
args = parser.parse_args()
# Get the web page from the namenode
url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % \
(args.namenode, args.port)
try:
page = Browser().open(url)
except IOError:
print 'CRITICAL: Cannot access namenode interface on %s:%d!' % \
(args.namenode, args.port)
sys.exit(2)
# parse the page
html = page.read()
soup = BeautifulSoup(html)
datanodes = soup.findAll('td', {'class' : 'name'})
pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'})
w_msg = ''
c_msg = ''
perfdata = ''
for (idx, node) in enumerate(datanodes):
pct = float(pcused[idx].contents[0].strip())
node = datanodes[idx].findChildren('a')[0].contents[0].strip()
if pct >= args.critical:
c_msg += ' %s=%.1f%%,' % (node, pct)
perfdata += ' %s=%.1f,' % (node, pct)
elif pct >= args.warning:
w_msg += ' %s=%.1f%%,' % (node, pct)
perfdata += ' %s=%.1f,' % (node, pct)
else:
perfdata += ' %s=%.1f,' % (node, pct)
# Prints the values and exits with the nagios exit code
if len(c_msg) > 0:
print ('CRITICAL:%s%s |%s' % (c_msg, w_msg, perfdata)).strip(',')[:1024]
sys.exit(2)
elif len(w_msg) > 0:
print ('WARNING:%s |%s' % (w_msg, perfdata)).strip(',')[:1024]
sys.exit(1)
elif len(perfdata) == 0:
print 'CRITICAL: Unable to find any node data in the page.'
sys.exit(2)
else:
print ('OK |%s' % (perfdata)).strip(',')[:1024]
sys.exit(0)
|
jucimarjr/IPC_2017-1
|
lista04/lista04_lista02_questao21.py
|
Python
|
apache-2.0
| 2,408
| 0.014632
|
#----------------------------------------------------------------------------------------------------------------------#
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
#
# Adham Lucas da Silva Oliveira 1715310059
# Alexandre Marques Uchôa 1715310028
# André Luís Laborda Neves 1515070006
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Diego Reis Figueira 1515070169
#
# Leia um valor de ponto flutuante com duas casas decimais. Este valor representa um valor monetário. A seguir, calcule
#o menor número de notas e moedas possíveis no qual o valor pode ser decomposto. As notas consideradas são de 100,50,20,
#10,5,2.As moedas possíveis são de 1, 0.50, 0.25, 0.10, 0.05 e 0.01. A seguir mostre a relação de notas necessárias.
#----------------------------------------------------------------------------------------------------------------------#
money = float(input())
if 0 <= money <= 1000000.00:
one_hundred_reals = money//100
money = money - (one_hundred_reals * 100)
fifty_reals = money//50
money = money - (fifty_reals * 50)
twent_reals = money//20
money = money - (twent_reals * 20)
ten_reals = money//10
money = money - (ten_reals * 10)
five_reals = money//5
money = money - (five_reals *5)
two_reals = money//2
money = money - (two_reals * 2)
one_real = money//1
money = money - (one_real * 1)
fifty_cents = money//0.50
money = money - (fifty_cents * 0.50)
t
|
wenty_five_cents = money//0.25
money = money - (
|
twenty_five_cents * 0.25)
ten_cents = money//0.10
money = money - (ten_cents * 0.10)
five_cents = money//0.05
money = money - (five_cents * 0.05)
one_cent = money//0.01
money = money - (one_cent * 0.01)
print('NOTAS:')
print('%.0f nota(s) de R$ 100.00' % one_hundred_reals)
print('%.0f nota(s) de R$ 50.00' % fifty_reals)
print('%.0f nota(s) de R$ 20.00' % twent_reals)
print('%.0f nota(s) de R$ 10.00' % ten_reals)
print('%.0f nota(s) de R$ 5.00' % five_reals)
print('%.0f nota(s) de R$ 2.00' % two_reals)
print('MOEDAS:')
print('%.0f moeda(s) de R$ 1.00' % one_real)
print('%.0f moeda(s) de R$ 0.50' % fifty_cents)
print('%.0f moeda(s) de R$ 0.25' % twenty_five_cents)
print('%.0f moeda(s) de R$ 0.10' % ten_cents)
print('%.0f moeda(s) de R$ 0.05' % five_cents)
print('%.0f moeda(s) de R$ 0.01' % one_cent)
|
biddisco/VTK
|
ThirdParty/Twisted/twisted/names/dns.py
|
Python
|
bsd-3-clause
| 58,390
| 0.004436
|
# -*- test-case-name: twisted.names.test.test_dns -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DNS protocol implementation.
Future Plans:
- Get rid of some toplevels, maybe.
"""
from __future__ import division, absolute_import
__all__ = [
'IEncodable', 'IRecord',
'A', 'A6', 'AAAA', 'AFSDB', 'CNAME', 'DNAME', 'HINFO',
'MAILA', 'MAILB', 'MB', 'MD', 'MF', 'MG', 'MINFO', 'MR', 'MX',
'NAPTR', 'NS', 'NULL', 'PTR', 'RP', 'SOA', 'SPF', 'SRV', 'TXT', 'WKS',
'ANY', 'CH', 'CS', 'HS', 'IN',
'ALL_RECORDS', 'AXFR', 'IXFR',
'EFORMAT', 'ENAME', 'ENOTIMP', 'EREFUSED', 'ESERVER',
'Record_A', 'Record_A6', 'Record_AAAA', 'Record_AFSDB', 'Record_CNAME',
'Record_DNAME', 'Record_HINFO', 'Record_MB', 'Record_MD', 'Record_MF',
'Record_MG', 'Record_MINFO', 'Record_MR', 'Record_MX', 'Record_NAPTR',
'Record_NS', 'Record_NULL', 'Record_PTR', 'Record_RP', 'Record_SOA',
'Record_SPF', 'Record_SRV', 'Record_TXT', 'Record_WKS', 'UnknownRecord',
'QUERY_CLASSES', 'QUERY_TYPES', 'REV_CLASSES', 'REV_TYPES', 'EXT_QUERIES',
'Charstr', 'Message', 'Name', 'Query', 'RRHeader', 'SimpleRecord',
'DNSDatagramProtocol', 'DNSMixin', 'DNSProtocol',
'OK', 'OP_INVERSE', 'OP_NOTIFY', 'OP_QUERY', 'OP_STATUS', 'OP_UPDATE',
'PORT',
'AuthoritativeDomainError', 'DNSQueryTimeoutError', 'DomainError',
]
# System imports
import warnings
import struct, random, types, socket
from itertools import chain
from io import BytesIO
AF_INET6 = socket.AF_INET6
from zope.interface import implementer, Interface, Attribute
# Twisted imports
from twisted.internet import protocol, defer
from twisted.internet.error import CannotListenError
from twisted.python import log, failure
from twisted.python import _utilpy3 as tputil
from twisted.python import randbytes
from twisted.python.compat import _PY3, unicode, comparable, cmp, nativeString
if _PY3:
def _ord2bytes(ordinal):
"""
Construct a bytes object representing a single byte with the given
ordinal value.
@type ordinal: C{int}
@rtype: C{bytes}
"""
return bytes([ordinal])
def _nicebytes(bytes):
"""
Represent a mostly textful bytes object in a way suitable for presentation
to an end user.
@param bytes: The bytes to represent.
@rtype: C{str}
"""
return repr(bytes)[1:]
def _nicebyteslist(list):
"""
Represent a list of mostly textful bytes objects in a way suitable for
presentation to an end user.
@param list: The list of bytes to represent.
@rtype: C{str}
"""
return '[%s]' % (
', '.join([_nicebytes(b) for b in list]),)
else:
_ord2bytes = chr
_nicebytes = _nicebyteslist = repr
def randomSource():
"""
Wrapper around L{randbytes.secureRandom} to return 2 random chars.
"""
return struct.unpack('H', randbytes.secureRandom(2, fallback=True))[0]
PORT = 53
(A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT,
RP, AFSDB) = range(1, 19)
AAAA = 28
SRV = 33
NAPTR = 35
A6 = 38
DNAME = 39
SPF = 99
QUERY_TYPES = {
A: 'A',
NS: 'NS',
MD: 'MD',
MF: 'MF',
CNAME: 'CNAME',
SOA: 'SOA',
MB: 'MB',
MG: 'MG',
MR: 'MR',
NULL: 'NULL',
WKS: 'WKS',
PTR: 'PTR',
HINFO: 'HINFO',
MINFO: 'MINFO',
MX: 'MX',
TXT: 'TXT',
RP: 'RP',
AFSDB: 'AFSDB',
# 19 through 27? Eh, I'll get to 'em.
AAAA: 'AAAA',
SRV: 'SRV',
NAPTR: 'NAPTR',
A6: 'A6',
DNAME: 'DNAME',
SPF: 'SPF'
}
IXFR, AXFR, MAILB, MAILA, ALL_RECORDS = range(251, 256)
# "Extended" queries (Hey, half of these are deprecated, good job)
EXT_QUERIES = {
IXFR: 'IXFR',
AXFR: 'AXFR',
MAILB: 'MAILB',
MAILA: 'MAILA',
ALL_RECORDS: 'ALL_RECORDS'
}
REV_TYPES = dict([
(v, k) for (k, v) in chain(QUERY_TYPES.items(), EXT_QUERIES.items())
])
IN, CS, CH, HS = range(1, 5)
ANY = 255
QUERY_CLASSES = {
IN: 'IN',
CS: 'CS',
CH: 'CH',
HS: 'HS',
ANY: 'ANY'
}
REV_CLASSES = dict([
(v, k) for (k, v) in QUERY_CLASSES.items()
])
# Opcodes
OP_QUERY, OP_INVERSE, OP_STATUS = range(3)
OP_NOTIFY = 4 # RFC 1996
OP_UPDATE = 5 # RFC 2136
# Response Codes
OK, EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED = range(6)
class IRecord(Interface):
"""
An single entry in a zone of authority.
"""
TYPE = Attribute("An indicator of what kind of record this is.")
# Backwards compatibility aliases - these should be deprecated or something I
# suppose. -exarkun
from twisted.names.error import DomainError, AuthoritativeDomainError
from twisted.names.error import DNSQueryTimeoutError
|
def str2time(s):
"""
Parse a string description of an interval into an integer number of seconds.
@param s: An interval definition constructed as an interval duration
followed by an interval unit. An interval duration is a base ten
representation of an integer. An interval unit is one of the following
letters: S (seconds),
|
M (minutes), H (hours), D (days), W (weeks), or Y
(years). For example: C{"3S"} indicates an interval of three seconds;
C{"5D"} indicates an interval of five days. Alternatively, C{s} may be
any non-string and it will be returned unmodified.
@type s: text string (C{str}) for parsing; anything else for passthrough.
@return: an C{int} giving the interval represented by the string C{s}, or
whatever C{s} is if it is not a string.
"""
suffixes = (
('S', 1), ('M', 60), ('H', 60 * 60), ('D', 60 * 60 * 24),
('W', 60 * 60 * 24 * 7), ('Y', 60 * 60 * 24 * 365)
)
if isinstance(s, str):
s = s.upper().strip()
for (suff, mult) in suffixes:
if s.endswith(suff):
return int(float(s[:-1]) * mult)
try:
s = int(s)
except ValueError:
raise ValueError("Invalid time interval specifier: " + s)
return s
def readPrecisely(file, l):
buff = file.read(l)
if len(buff) < l:
raise EOFError
return buff
class IEncodable(Interface):
"""
Interface for something which can be encoded to and decoded
from a file object.
"""
def encode(strio, compDict = None):
"""
Write a representation of this object to the given
file object.
@type strio: File-like object
@param strio: The stream to which to write bytes
@type compDict: C{dict} or C{None}
@param compDict: A dictionary of backreference addresses that have
have already been written to this stream and that may be used for
compression.
"""
def decode(strio, length = None):
"""
Reconstruct an object from data read from the given
file object.
@type strio: File-like object
@param strio: The stream from which bytes may be read
@type length: C{int} or C{None}
@param length: The number of bytes in this RDATA field. Most
implementations can ignore this value. Only in the case of
records similar to TXT where the total length is in no way
encoded in the data is it necessary.
"""
@implementer(IEncodable)
class Charstr(object):
def __init__(self, string=b''):
if not isinstance(string, bytes):
raise ValueError("%r is not a byte string" % (string,))
self.string = string
def encode(self, strio, compDict=None):
"""
Encode this Character string into the appropriate byte format.
@type strio: file
@param strio: The byte representation of this Charstr will be written
to this file.
"""
string = self.string
ind = len(string)
strio.write(_ord2bytes(ind))
strio.write(string)
def decode(self, strio, length=None):
"""
Decode a byte string into this Charstr.
@type strio: file
@param strio: Bytes will be read from this file until the full string
is decoded.
|
asphalt-framework/asphalt-web
|
asphalt/web/websocket.py
|
Python
|
apache-2.0
| 2,282
| 0
|
from typing import Union
from asphalt.core.context import Context
from wsproto.connection import WSConnection, ConnectionType
from wsproto.events import ConnectionRequested, ConnectionClosed, DataReceived
from asphalt.web.api import AbstractEndpoint
from asphalt.web.request import HTTPRequest
from asphalt.web.servers.base import BaseHTTPClientConnection
class WebSocketEndpoint(AbstractEndpoint):
"""
Implements websocket endpoints.
Subprotocol negotiation is currently not supported.
"""
__slots__ = ('ctx', '_client', '_ws')
def __init__(self, ctx: Context, client: BaseHTTPClientConnection):
self.ctx = ctx
self._client = client
self._ws = WSConnection(ConnectionType.SERVER)
def _process_ws_events(self):
for event in self._ws.events():
if isinstance(event, ConnectionRequested):
self._ws.accept(event)
self.on_connect()
elif isinstance(event, DataReceived):
self.on_data(event.data)
elif isinstance(event, ConnectionClosed):
self.on_close()
bytes_to_send = self._ws.bytes_to_send()
if bytes_to_send:
self._client.write(bytes_to_send)
def begin_request(self, request: HTTPRequest):
trailing_data = self._client.upgrade()
self._ws.receive_bytes(trailing_data)
self._process_ws_events()
def receive_body_data(self, data: bytes) -> None:
self._ws.receive_bytes(data)
self._process_ws_events()
def send_message(self, payload: Union[str, bytes]) -> None:
"""
Send a message to the client.
:param payload: either a unicode string or a bytestring
"""
self._ws.send_data(payload)
bytes_to_send = self._ws.bytes_to_send()
self._client.write(bytes_to_send)
def close(self) -> None:
"""Close the websocket."""
self._ws.close()
self._process_ws_events()
def on_connect(self) -> None:
"""Called when the websocket handshake has been done."""
def on_close(self) -> None:
"""Called when the connection has been closed."""
|
def on_data(self, data: bytes) -> None:
"""Called when there is new data from the peer
|
."""
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/package/scripts/ldap.py
|
Python
|
apache-2.0
| 1,729
| 0.004627
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
def _ldap_common():
import params
File(os.path.join(params.knox_conf_dir, 'ldap-log4j.properties'),
mode=params.mode,
group=params.knox_group,
owner=params.knox_user,
content=params.ldap_log4j
)
File(os.path.join(params.knox_conf_dir, 'users.ldif'),
|
mode=params.mode,
group=params.knox_group,
owner=params.knox_user,
content=params.users_ldif
)
#@OsFamilyFuncImpl(os_#family=OSConst.WINSRV_FAMILY)
#def ldap():
# import params
#
# # Manually overriding service logon user & password set by the installation package
# ServiceConfig(params.knox_ldap_win_service_name,
# action="change_user",
# username = params.knox_user,
# password = Script.get_password(params.knox_user))
#
# _ldap_common()
#@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def ldap():
_ldap_common()
|
ioO/billjobs
|
billjobs/migrations/0002_service_is_available.py
|
Python
|
mit
| 478
| 0.002092
|
# -*- codin
|
g: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-20 16:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billjobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='service',
name='is_available',
field=models.BooleanField(default=True, verbose_name='Is available ?'
|
),
),
]
|
mordred-descriptor/mordred
|
mordred/GeometricalIndex.py
|
Python
|
bsd-3-clause
| 2,504
| 0.000399
|
from __future__ import division
from ._base import Descriptor
from ._graph_matrix import Radius3D as CRadius3D
from ._graph_matrix import Diameter3D as CDiameter3D
__all__ = ("Diameter3D", "Radius3D", "GeometricalShapeIndex", "PetitjeanIndex3D")
class GeometricalIndexBase(Descriptor):
__slots__ = ()
explicit_hydrogens = True
require_3D = True
@classmethod
def preset(cls, version):
yield cls()
def parameters(self):
return ()
rtype = float
class Radius3D(GeometricalIndexBase):
r"""geometric radius descriptor."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "geometric radius"
def __str__(self):
return "GeomRadius"
def dependencies(self):
return {"R": CRadius3D(self.explicit_hydrogens)}
def calculate(self, R):
return R
class Diameter3D(GeometricalIndexBase):
r"""geometric diameter descriptor."""
since = "1.0.0"
__slots__ = ()
def description(self):
return "geometric diameter"
def __str__(self):
return "GeomDiameter"
def dependencies(self):
return {"D": CDiameter3D(self.explicit_hydrogens)}
def calculate(self, D):
r
|
eturn D
class GeometricalShapeIndex(GeometricalIndexBase):
r"""ge
|
ometrical shape index descriptor.
.. math::
I_{\rm topo} = \frac{D - R}{R}
where
:math:`R` is geometric radius,
:math:`D` is geometric diameter.
:returns: NaN when :math:`R = 0`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "geometrical shape index"
def __str__(self):
return "GeomShapeIndex"
def dependencies(self):
return {
"D": CDiameter3D(self.explicit_hydrogens),
"R": CRadius3D(self.explicit_hydrogens),
}
def calculate(self, R, D):
with self.rethrow_zerodiv():
return (D - R) / (R)
class PetitjeanIndex3D(GeometricalShapeIndex):
r"""geometric Petitjean index descriptor.
.. math::
I_{\rm Petitjean} = \frac{D - R}{D}
where
:math:`R` is geometric radius,
:math:`D` is geometric diameter.
:returns: NaN when :math:`D = 0`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "geometric Petitjean index"
def __str__(self):
return "GeomPetitjeanIndex"
def calculate(self, R, D):
with self.rethrow_zerodiv():
return (D - R) / (D)
|
rosenvladimirov/addons
|
printer_tray/__openerp__.py
|
Python
|
agpl-3.0
| 1,538
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################
|
###############################
{'name': 'Report to printer - Paper tray selection
|
',
'version': '8.0.1.0.1',
'category': 'Printer',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'license': 'AGPL-3',
'depends': ['base_report_to_printer',
],
'data': [
'users_view.xml',
'ir_report_view.xml',
'printer_view.xml',
'report_xml_action_view.xml',
'security/ir.model.access.csv',
],
'external_dependencies': {
'python': ['cups'],
},
'installable': True,
'auto_install': False,
'application': True,
}
|
wtpayne/hiai
|
a3_src/h70_internal/da/report/html_builder.py
|
Python
|
apache-2.0
| 2,607
| 0.003836
|
# -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2
# -----------------------------------------------------------------------------
def build(_, section_list, filepath):
"""
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section)
html = template.render( # pylint: disable=E1101
|
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html)
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
#
|
key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath)
|
tuturto/pyherc
|
src/pyherc/rules/constants.py
|
Python
|
mit
| 1,438
| 0.002086
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTR
|
ACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for various constants
"""
PIERCING_DAMAGE = 'piercing'
CRUSHING_DAMAGE = 'crushing'
SLASHING_DAMAGE = 'slashing'
ELEMENTAL_DAMAGE = 'elemental'
DARK_DAMAGE = 'dark'
LIGHT_DAMAGE = 'light'
POISON_DAMAGE = 'poison'
INSTANT_ACTION = 1
FAST_AC
|
TION = 2
NORMAL_ACTION = 4
SLOW_ACTION = 8
LONG_ACTION = 16
|
akosel/incubator-airflow
|
tests/contrib/hooks/test_jira_hook.py
|
Python
|
apache-2.0
| 1,861
| 0
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
mo
|
dels.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
|
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
|
FinnStutzenstein/OpenSlides
|
server/openslides/saml/urls.py
|
Python
|
mit
| 229
| 0
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns
|
= [
url(r"^$", csrf_exempt(views.SamlView.as_view())),
url(r"^metadata/$", views.ser
|
ve_metadata),
]
|
enthought/etsproxy
|
enthought/mayavi/modules/slice_unstructured_grid.py
|
Python
|
bsd-3-clause
| 107
| 0
|
# proxy modu
|
le
from __future__ import absolute_import
from mayavi.modules.slice_unstructure
|
d_grid import *
|
sfu-ireceptor/gateway
|
resources/agave_apps/genoa/genoa.py
|
Python
|
lgpl-3.0
| 88
| 0
|
import sys
if __name__
|
== "__main__":
print("Genoa python
|
script")
sys.exit(0)
|
Jajcus/pyxmpp
|
examples/c2s_test.py
|
Python
|
lgpl-2.1
| 1,349
| 0.014837
|
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
import libxml2
import time
import traceback
import sys
import logging
from pyxmpp.all import JID,Iq,Presence,Message,StreamError
from pyxmpp.jabber.all import Client
class Disconnected(Exception):
pass
class MyClient(Client):
def session_started(self):
self.stream.send(Presence())
def idle(self):
print "idle"
Client.idle(self)
if self.session_established:
target=JID("jajcus",s.jid.domain)
self.stream.send(Message(to_jid=target,body=unicode("Teścik","utf-8")))
def post_disconn
|
ect(self):
print "Disconnected"
raise Disconnected
logger=logging.getLogger()
logger.
|
addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
libxml2.debugMemory(1)
print "creating stream..."
s=MyClient(jid=JID("test@localhost/Test"),password=u"123",auth_methods=["sasl:DIGEST-MD5","digest"])
print "connecting..."
s.connect()
print "processing..."
try:
try:
s.loop(1)
finally:
s.disconnect()
except KeyboardInterrupt:
traceback.print_exc(file=sys.stderr)
except (StreamError,Disconnected),e:
raise
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
# vi: sts=4 et sw=4
|
BeenzSyed/tempest
|
tempest/api/compute/floating_ips/test_floating_ips_actions.py
|
Python
|
apache-2.0
| 5,488
| 0.000182
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute.floating_ips import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
_interface = 'json'
server_id = None
floating_ip = None
@classmethod
def setUpClass(cls):
super(FloatingIPsTestJSON, cls).setUpClass()
cls.client = cls.floating_ips_client
#cls.servers_client = cls.servers_client
# Server creation
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
# Floating IP creation
resp, body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
@attr(type='gate')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
# should be successful
resp, body = self.client.create_floating_ip()
floating_ip_id_allocated = body['id']
self.addCleanup(self.client.delete_floating_ip,
floating_ip_id_allocated)
self.assertEqual(200, resp.status)
resp, floating_ip_details = \
self.client.get_floating_ip_details(floating_ip_id_allocated)
# Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
self.assertIn(floating_ip_details, body)
@attr(type='gate')
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
# Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
# Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
# Deleting the floating IP from the project
resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
self.assertEqual(202, resp.status)
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
@attr(type='gate')
def test_associate_disassociate_floating_ip(self):
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
# Association of floating IP to fixed IP address
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
# Disassociation of floating IP that was associated in this method
resp, body = self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
@attr(type='gate')
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
|
# Create server so as to use for Multiple association
new_name = rand_name('floating_se
|
rver')
resp, body = self.servers_client.create_server(new_name,
self.image_ref,
self.flavor_ref)
self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
self.new_server_id = body['id']
# Associating floating IP for the first time
resp, _ = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
# Associating floating IP for the second time
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
self.addCleanup(self.servers_client.delete_server, self.new_server_id)
if (resp['status'] is not None):
self.addCleanup(self.client.disassociate_floating_ip_from_server,
self.floating_ip,
self.new_server_id)
# Make sure no longer associated with old server
self.assertRaises((exceptions.NotFound,
exceptions.UnprocessableEntity),
self.client.disassociate_floating_ip_from_server,
self.floating_ip, self.server_id)
class FloatingIPsTestXML(FloatingIPsTestJSON):
_interface = 'xml'
|
quaddra/engage
|
python_pkg/engage/drivers/genforma/drivertest_celery.py
|
Python
|
apache-2.0
| 2,355
| 0.001274
|
resource_id = "celery-1"
_install_script = """
[ { "id": "celery-1",
"key": {"name": "Celery", "version": "2.3"},
"config_port": {
"password": "engage_129",
"username": "engage_celery",
"vhost": "engage_celery_vhost"
},
"input_ports": {
"broker": {
"BROKER_HOST": "${hostname}",
"BROKER_PORT": "5672",
"broker": "rabbitmqctl"
},
"host": {
"cpu_arch": "x86_64",
"genforma_home": "${deployment_home}",
"hostname": "${hostname}",
"log_directory": "${deployment_home}/log",
"os_type": "mac-osx",
"os_
|
user_name": "${username}",
"private_ip": null,
"sudo_password": "GenForma/${username}/sudo_password"
},
"pip": {
"pipbin": "${deployment_home}/python/bin/pip"
},
"python": {
"PYTHONPATH": "${deployment_home}/python/lib/python2.7/site-packages/",
"home": "${deployment_home}/python/bin/python",
"python_bin_dir": "${deployment_home}/python/bin
|
",
"type": "python",
"version": "2.7"
},
"setuptools": {
"easy_install": "${deployment_home}/python/bin/easy_install"
}
},
"output_ports": {
"celery": {
"broker": "rabbitmqctl",
"password": "engage_129",
"username": "engage_celery",
"vhost": "engage_celery_vhost"
}
},
"inside": {
"id": "${hostname}",
"key": {"name": "mac-osx", "version": "10.6"},
"port_mapping": {
"host": "host"
}
},
"environment": [
{
"id": "rabbitmq-1",
"key": {"name": "rabbitmq", "version": "2.4"},
"port_mapping": {
"broker": "broker"
}
},
{
"id": "python-1",
"key": {"name": "python", "version": "2.7"},
"port_mapping": {
"python": "python"
}
},
{
"id": "__GF_inst_2",
"key": {"name": "pip", "version": "any"},
"port_mapping": {
"pip": "pip"
}
},
{
"id": "setuptools-1",
"key": {"name": "setuptools", "version": "0.6"},
"port_mapping": {
"setuptools": "setuptools"
}
}
]
}
]
"""
def get_install_script():
return _install_script
def get_password_data():
return {}
|
wackou/smewt
|
smewt/actions.py
|
Python
|
gpl-3.0
| 5,613
| 0.004276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyrig
|
ht (c) 2008-2013 Nicolas Wack <wackou@smewt.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Founda
|
tion; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from smewt.base.utils import tolist, toresult
from smewt.base.textutils import u
from smewt.base.subtitletask import SubtitleTask
from smewt.plugins import mplayer
from guessit.language import Language
import smewt
import os, sys, time
import subprocess
import logging
log = logging.getLogger(__name__)
def get_episodes_and_subs(language, series, season=None):
if season:
episodes = set(ep for ep in tolist(series.episodes) if ep.season == int(season))
else:
episodes = set(tolist(series.episodes))
subs = []
for ep in episodes:
subs.extend(tolist(ep.get('subtitles')))
return episodes, subs
def get_subtitles(media_type, title, season=None, language=None):
db = smewt.SMEWTD_INSTANCE.database
language = language or db.config.get('subtitleLanguage') or 'en'
if media_type == 'episode':
series = db.find_one('Series', title=title)
episodes, subs = get_episodes_and_subs(language, series, season)
already_good = set(s.metadata for s in subs)
episodes = episodes - already_good
if episodes:
subtask = SubtitleTask(episodes, language)
smewt.SMEWTD_INSTANCE.taskManager.add(subtask)
return 'OK'
else:
msg = 'All episodes already have %s subtitles!' % Language(language).english_name
log.info(msg)
return msg
elif media_type == 'movie':
movie = db.find_one('Movie', title=title)
# check if we already have it
for sub in tolist(movie.get('subtitles')):
if sub.language == language:
msg = 'Movie already has a %s subtitle' % Language(language).english_name
log.info(msg)
return msg
subtask = SubtitleTask(movie, language)
smewt.SMEWTD_INSTANCE.taskManager.add(subtask)
return 'OK'
else:
msg = 'Don\'t know how to fetch subtitles for type: %s' % media_type
log.error(msg)
return msg
def _play(files, subs):
# launch external player
args = files
# make sure subs is as long as args so as to not cut it when zipping them together
subs = subs + [None] * (len(files) - len(subs))
if mplayer.variant != 'undefined':
# if we have mplayer (or one of its variant) installed, use it with
# subtitles support
opts = []
return mplayer.play(files, subs, opts)
elif sys.platform == 'linux2':
action = 'xdg-open'
# FIXME: xdg-open only accepts 1 argument, this will break movies split in multiple files...
args = args[:1]
# if we have smplayer installed, use it with subtitles support
if os.system('which smplayer') == 0:
action = 'smplayer'
args = [ '-fullscreen', '-close-at-end' ]
for video, subfile in zip(files, subs):
args.append(video)
if subfile:
args += [ '-sub', subfile ]
elif sys.platform == 'darwin':
action = 'open'
elif sys.platform == 'win32':
action = 'open'
log.info('launching %s with args = %s' % (action, str(args)))
subprocess.call([action]+args)
def play_video(metadata, sublang=None):
# FIXME: this should be handled properly with media player plugins
# files should be a list of (Metadata, sub), where sub is possibly None
# then we would look into the available graphs where such a Metadata has files,
# and choose the one on the fastest media (ie: local before nfs before tcp)
# it should also choose subtitles the same way, so we could even imagine reading
# the video from one location and the subs from another
# find list of all files to be played
# returns a list of (video_filename, sub_filename)
if sublang:
msg = 'Playing %s with %s subtitles' % (metadata, Language(sublang).english_name)
else:
msg = 'Playing %s with no subtitles' % metadata
log.info(u(msg))
# FIXME: we assume that sorting alphanumerically is good enough, but that is
# not necessarily the case...
# we should also look whether the file also has the 'cdNumber' attribute
files = tolist(metadata.get('files'))
files = sorted(files, key=lambda f: f.get('filename'))
if sublang is not None:
sublang = Language(sublang)
for sub in tolist(metadata.get('subtitles')):
if sub.language == sublang:
subs = sorted(tolist(sub.get('files')), key=lambda f: f.get('filename'))
break
else:
subs = [None]*len(files)
# update last viewed info
metadata.lastViewed = time.time()
metadata.watched = True
_play([ f.filename for f in files],
[ s.filename for s in subs if s ])
def play_file(filename):
_play([filename], [None])
|
mitodl/odl-video-service
|
ui/migrations/0011_collection_created_at.py
|
Python
|
bsd-3-clause
| 602
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-12 19:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(m
|
igrations.Migration):
dependencies = [
("ui", "0010_video_default_sort"),
]
operations = [
migrations.AddField(
model_name="collection",
name="created_at",
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterModelOptions(
name="collection",
options={"ordering": ["-created_at"]}
|
,
),
]
|
jiobert/python
|
Paracha_Junaid/Assignments/Python/Web_fund/stars.py
|
Python
|
mit
| 368
| 0.048913
|
# x = [4, 6, 1, 3, 5, 7, 25]
# def stars (a):
# i = 0
# while (i < len(a)):
# print '*' * a[i]
# i += 1
# stars(x)
x = [4, "Tom", 1, "Michael", 5, 7, "Jimmy Smith"]
def stars (a):
i = 0
while (i < len(a)):
if type(a[i]) is in
|
t:
|
print '*' * a[i]
i+=1
else:
temp = a[i]
temp = temp.lower()
print (len(a[i])) * temp[0]
i += 1
stars(x)
|
Teekuningas/mne-python
|
mne/utils/tests/test_progressbar.py
|
Python
|
bsd-3-clause
| 3,513
| 0
|
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import ProgressBar, array_split_idx, use_log_level
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)
pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
pytest.raises(Exception, iter_func, ProgressBar(20))
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version
|
."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel,
|
p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
|
yubang/smallMonitor
|
lib/disk.py
|
Python
|
apache-2.0
| 903
| 0.025094
|
#coding:UTF-8
"""
磁盘监控模块
"""
from config import disk
from lib
|
import core
import os,re
def init():
"对外接口"
sign=True
for t in disk.DISK_PATH:
warn,data=check(t)
if not warn:
login_time=time.time()
message="磁盘监控预警提示,磁盘使用率超过%s"%(disk.DISK_USED)+"%\n监控结果:"+data
message=message.decode("UTF-8")
print message
|
core.sendEmail(message)
print u"邮件已经发出"
sign=False
return sign
def getIntervalTime():
"获取检测间隔时间"
return disk.DISK_DELAY
def check(path):
"检测是否超出预警"
r=os.popen("df -h "+path)
for line in r:
data=line.rstrip()
datas=re.split(r'\s+',data)
used=datas[4].encode("UTF-8").replace("%","")
return int(used) < disk.DISK_USED,data
|
endlessm/chromium-browser
|
third_party/llvm/lldb/examples/customization/pwd-cd-and-system/utils.py
|
Python
|
bsd-3-clause
| 1,600
| 0
|
"""Utility for changing directories and execution of commands in a subshell."""
from __future__ import print_function
import os
import shlex
import subprocess
# Store the previ
|
ous working directory for the 'cd -' command.
class Holder:
"""Holds the _prev_dir_ class attribute for chdir() function."""
_prev_dir_ = None
@classmethod
def prev_dir(cls):
return cls._prev_dir_
@classmethod
def swap(cls, dir):
cls._prev_dir_ = dir
def chdir(debugger, args, result, dict):
"""Change the working directory, or cd to ${HOME}.
You can
|
also issue 'cd -' to change to the previous working directory."""
new_dir = args.strip()
if not new_dir:
new_dir = os.path.expanduser('~')
elif new_dir == '-':
if not Holder.prev_dir():
# Bad directory, not changing.
print("bad directory, not changing")
return
else:
new_dir = Holder.prev_dir()
Holder.swap(os.getcwd())
os.chdir(new_dir)
print("Current working directory: %s" % os.getcwd())
def system(debugger, command_line, result, dict):
"""Execute the command (a string) in a subshell."""
args = shlex.split(command_line)
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
retcode = process.poll()
if output and error:
print("stdout=>\n", output)
print("stderr=>\n", error)
elif output:
print(output)
elif error:
print(error)
print("retcode:", retcode)
|
srirajan/lakkucast
|
lakkucast.py
|
Python
|
apache-2.0
| 21,535
| 0.014442
|
#!/usr/bin/python
#credits : https://gist.github.com/TheCrazyT/11263599
import socket
import ssl
import select
import time
import re
import sys
from thread import start_new_thread
from struct import pack
from random import randint
from subprocess import call
import os
import fnmatch
import argparse
import logging
class lakkucast:
def __init__(self):
self.status = None
self.session_id = None
self.protocolVersion = 0
self.source_id = "sender-0"
self.destination_id = "receiver-0"
self.chromecast_server = "192.168.1.23" #living room audio
self.socket = 0
self.type_enum = 0
self.type_string = 2
self.type_bytes = self.type_string
self.session = 0
self.play_state = None
self.sleep_between_media = 5
self.content_id = None
self.socket_fail_count = 100
def clean(self,s):
return re.sub(r'[\x00-\x1F\x7F]', '?',s)
def getType(self, fieldId,t):
return (fieldId << 3) | t
def getLenOf(self, s):
x = ""
l = len(s)
while(l > 0x7F):
x += pack("B",l & 0x7F | 0x80)
l >>= 7
x += pack("B",l & 0x7F)
return x
def init_status(self):
self.socket = socket.socket()
self.socket = ssl.wrap_socket(self.socket)
#print "connecting ..."
self.socket.connect((self.chromecast_server,8009))
payloadType = 0 #0=string
data = "{\"type\":\"CONNECT\",\"origin\":{}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "Connecting ..."
self.socket.write(msg)
payloadType = 0 #0=string
data = "{\"type\":\"GET_STATUS\",\"requestId\":46479000}"
lnData = self.getLenOf(data)
namespace = "urn:x-cast:com.google.cast.receiver"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" % (len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
|
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
|
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print "sending status request..."
self.socket.write(msg)
m1=None
m3=None
result=""
count = 0
while m1==None and m3==None:
lastresult = self.socket.read(2048)
result += lastresult
#print "#"+lastresult.encode("hex")
#if lastresult != "":
# print self.clean("\nH!"+lastresult)
#print result
m1 = re.search('"sessionId":"(?P<session>[^"]+)"', result)
m2 = re.search('"statusText":"(?P<status>[^"]+)"', result)
m3 = re.search('"playerState":"(?P<play_state>[^"]+)"', result)
m4 = re.search('"contentId":"(?P<content_id>[^"]+)"', result)
count = count + 1
if count > self.socket_fail_count:
self.status = None
self.play_state = None
self.status = None
break
#print "#%i" % (m==None)
if m1 != None:
#print "session:",m1.group("session")
self.session = m1.group("session")
if m2 != None:
#print "status:",m2.group("status")
self.status = m2.group("status")
if m3 != None:
#print "play_state:",m3.group("play_state")
self.play_state = m3.group("play_state")
if m4 != None:
#print "contentid:",m4.group("content_id")
self.content_id = m4.group("content_id")
payloadType = 0 #0=string
data = "{MESSAGE_TYPE: 'SET_VOLUME','volume': {'level': 0.2}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "Connecting ..."
self.socket.write(msg)
def get_status(self):
return " ".join(["main_status:" , self.get_main_status() , "play_status:" , self.get_play_status()])
def get_main_status(self):
if self.status == None:
status_str = "None"
else:
status_str = self.status
return (status_str)
def get_play_status(self):
if self.play_state == None:
play_state_str = "None"
else:
play_state_str = self.play_state
return (play_state_str)
def ready_to_play(self):
if self.status == "Now Casting":
return False
if self.status == "Ready To Cast" or self.status == None or self.status == "Chromecast Home Screen":
if self.play_state == None:
return True
if self.play_state == "IDLE":
return True
if s
|
biomodels/BIOMD0000000155
|
BIOMD0000000155/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
i
|
mport os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000155.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromSt
|
ring(sbmlString)
|
yangke/cluehunter
|
test/Test_objdump_addr.py
|
Python
|
gpl-3.0
| 852
| 0.023474
|
'''
Created on Oct 29, 2015
@author: yangke
'''
from model.TaintVar import TaintVar
from TraceTrackTest import TraceTrackTest
class Test_objdump_addr:
def test(self):
passed_message="BINUTILS-2.23 'addr[1]' TEST PASSED!"
not_pass_message="ERRORS FOUND IN BINUTILS-2.23 'addr[1]' TEST!"
answer_path='answers/binutils/binutils-2.23/objdump/'
name='binutils-2.23_objdump_addr'
logfile_path="gdb_logs/binutils-2.23/binutils-2.23_objdump_gdb.txt"
c_p
|
roj_path="gdb_logs/binutils-2.23/binutils-2.23"
taintVars=[TaintVar("addr",['*'])]
test=TraceTrackTest(answer_path,name,logfile_path,taintVars,passed_message,not_pass_message)
test.set_c_proj_path(c_proj_path)
passed=test.test()
return passed
if __name__ == '__main__':
test=Test_objdump_addr(
|
)
test.test()
|
toontownfunserver/Panda3D-1.9.0
|
direct/tkwidgets/Floater.py
|
Python
|
bsd-3-clause
| 13,935
| 0.008396
|
"""
Floater Class: Velocity style controller for floating point values with
a label, entry (validated), and scale
"""
__all__ = ['Floater', 'FloaterWidget', 'FloaterGroup']
from direct.showbase.TkGlobal import *
from Tkinter import *
from Valuator import Valuator, VALUATOR_MINI, VALUATOR_FULL
from direct.task import Task
import math, sys, string, Pmw
FLOATER_WIDTH = 22
FLOATER_HEIGHT = 18
class Floater(Valuator):
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('style', VALUATOR_MINI, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Valuator.__init__(self, parent)
self.initialiseoptions(Floater)
def createValuator(self):
self._valuator = self.createcomponent('valuator',
(('floater', 'valuator'),),
None,
FloaterWidget,
(self.interior(),),
command = self.setEntry,
value = self['value'])
self._valuator._widget.bind('<Double-ButtonPress-1>', self.mouseReset)
def packValuator(self):
# Position components
if self._label:
self._label.grid(row=0, column=0, sticky = EW)
self._entry.grid(row=0, column=1, sticky = EW)
self._valuator.grid(row=0, column=2, padx = 2, pady = 2)
self.interior().columnconfigure(0, weight = 1)
class FloaterWidget(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
# Appearance
('width', FLOATER_WIDTH, INITOPT),
('height', FLOATER_HEIGHT, INITOPT),
('relief', RAISED, self.setRelief),
('borderwidth', 2, self.setBorderwidth),
('background', 'grey75', self.setBackground),
# Behavior
# Initial value of floater, use self.set to change value
('value', 0.0, INITOPT),
('numDigits', 2, self.setNumDigits),
# Command to execute on floater updates
('command', None, None),
# Extra data to be passed to command function
('commandData', [], None),
# Callback's to execute during mouse interaction
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Pmw.MegaWidget.__init__(self, parent)
# Set up some local and instance variables
# Create the components
interior = self.interior()
# Current value
self.value = self['value']
# The canvas
width = self['width']
height = self['height']
self._widget = self.createcomponent('canvas', (), None,
Canvas, (interior,),
width = width,
height = height,
background = self['background'],
|
highlightthickness = 0,
scrollregion = (-width/2.0,
|
-height/2.0,
width/2.0,
height/2.0))
self._widget.pack(expand = 1, fill = BOTH)
# The floater icon
self._widget.create_polygon(-width/2.0, 0, -2.0, -height/2.0,
-2.0, height/2.0,
fill = 'grey50',
tags = ('floater',))
self._widget.create_polygon(width/2.0, 0, 2.0, height/2.0,
2.0, -height/2.0,
fill = 'grey50',
tags = ('floater',))
# Add event bindings
self._widget.bind('<ButtonPress-1>', self.mouseDown)
self._widget.bind('<B1-Motion>', self.updateFloaterSF)
self._widget.bind('<ButtonRelease-1>', self.mouseUp)
self._widget.bind('<Enter>', self.highlightWidget)
self._widget.bind('<Leave>', self.restoreWidget)
# Make sure input variables processed
self.initialiseoptions(FloaterWidget)
def set(self, value, fCommand = 1):
"""
self.set(value, fCommand = 1)
Set floater to new value, execute command if fCommand == 1
"""
# Send command if any
if fCommand and (self['command'] != None):
apply(self['command'], [value] + self['commandData'])
# Record value
self.value = value
def updateIndicator(self, value):
# Nothing visible to update on this type of widget
pass
def get(self):
"""
self.get()
Get current floater value
"""
return self.value
## Canvas callback functions
# Floater velocity controller
def mouseDown(self, event):
""" Begin mouse interaction """
# Exectute user redefinable callback function (if any)
self['relief'] = SUNKEN
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
self.velocitySF = 0.0
self.updateTask = taskMgr.add(self.updateFloaterTask,
'updateFloater')
self.updateTask.lastTime = globalClock.getFrameTime()
def updateFloaterTask(self, state):
"""
Update floaterWidget value based on current scaleFactor
Adjust for time to compensate for fluctuating frame rates
"""
currT = globalClock.getFrameTime()
dt = currT - state.lastTime
self.set(self.value + self.velocitySF * dt)
state.lastTime = currT
return Task.cont
def updateFloaterSF(self, event):
"""
Update velocity scale factor based of mouse distance from origin
"""
x = self._widget.canvasx(event.x)
y = self._widget.canvasy(event.y)
offset = max(0, abs(x) - Valuator.deadband)
if offset == 0:
return 0
sf = math.pow(Valuator.sfBase,
self.minExp + offset/Valuator.sfDist)
if x > 0:
self.velocitySF = sf
else:
self.velocitySF = -sf
def mouseUp(self, event):
taskMgr.remove(self.updateTask)
self.velocitySF = 0.0
# Execute user redefinable callback function (if any)
if self['postCallback']:
apply(self['postCallback'], self['callbackData'])
self['relief'] = RAISED
def setNumDigits(self):
"""
Adjust minimum exponent to use in velocity task based
upon the number of digits to be displayed in the result
"""
self.minExp = math.floor(-self['numDigits']/
math.log10(Valuator.sfBase))
# Methods to modify floater characteristics
def setRelief(self):
self.interior()['relief'] = self['relief']
def setBorderwidth(self):
self.interior()['borderwidth'] = self['borderwidth']
def setBackground(self):
self._widget['background'] = self['background']
def highlightWidget(self, event):
self._widget.itemconfigure('floater', fill = 'black')
def restoreWidget(self, event):
self._widget.itemconfigure('floater', fill = 'grey50')
class FloaterGroup(Pmw.MegaToplevel):
def __init__(self, parent = None, **k
|
mastizada/pontoon
|
pontoon/api/schema.py
|
Python
|
bsd-3-clause
| 3,806
| 0.000263
|
import graphene
from graphene_django import DjangoObjectType
from graphene_django.debug import DjangoDebug
from pontoon.api.util import get_fields
from pontoon.base.models import (
Project as ProjectModel,
Locale as LocaleModel,
ProjectLocale as ProjectLocaleModel
)
class Stats(graphene.AbstractType):
missing_strings = graphene.Int()
complete = graphene.Boolean()
class ProjectLocale(DjangoObjectType, Stats):
class Meta:
model = ProjectLocaleModel
only_fields = (
'total_strings', 'approved_strings', 'translated_strings',
'fuzzy_strings', 'project', 'locale'
)
class Project(DjangoObjectType, Stats):
class Meta:
model = ProjectModel
only_fields = (
'name', 'slug', 'disabled', 'info', 'deadline', 'priority',
'contact', 'total_strings', 'approved_strings',
'translated_strings', 'fuzzy_strings'
)
localizations = graphene.List(ProjectLocale)
@graphene.resolve_only_args
def resolve_localizations(obj):
return obj.project_locale.all()
class Locale(DjangoObjectType, Stats):
class Meta:
model = LocaleModel
only_fields = (
'name', 'code', 'direction', 'script', 'population',
'total_strings', 'approved_strings', 'translated_strings',
'fuzzy_strings'
)
localizations = graphene.List(ProjectLocale, include_disabled=graphene.Boolean(False))
@graphene.resolve_only_args
def resolve_localizations(obj, include_disabled):
qs = obj.project_locale
if include_disabled:
return qs.all()
return qs.filter(project__disabled=False)
class Query(graphene.ObjectType):
debug = graphene.Field(DjangoDebug, name='__debug')
# include_disabled=True will return both active and disabled projects.
projects = graphene.List(Project, include_disabled=graphene.Boolean(False))
project = graphene.Field(Project, slug=graphene.String())
locales = graphene.List(Locale)
locale = graphene.Field(Locale, code=graphene.String())
def resolve_projects(obj, args, context, info):
qs = ProjectModel.objects
fields = get_fields(info)
if 'projects.localizations' in fields:
qs = qs.prefetch_related('project_locale__locale')
if 'projects.localizations.locale.localizations' in fields:
raise Exception('Cyclic queries are forbidden')
if args['include_disabled']:
return qs.all()
return qs.filter(disabled=False)
def resolve_project(obj, args, context, info):
qs = ProjectModel.objects
fields = get_fields(info)
if 'project.localizations' in fields:
qs = qs.prefetch_related('project_locale__locale')
if 'project.localizations.locale.localizations' in fields:
raise Exception('Cyclic queries are forbidden')
return qs.get(slug=args['slug'])
def resolve_locales(obj, args, context, info):
qs = LocaleModel.objects
fields = get_fields(info)
if 'locales.localizations' in fields:
qs = qs.prefetch_related('project_locale__project')
if 'locales.localizations.project.localizations' in fields:
raise Exception('Cyclic qu
|
eries are forbidden')
return qs.all()
def resolve_locale(obj, args, context, info):
qs = LocaleModel.objects
fields = get_fields(info)
if 'locale.localizations' in fields:
qs = qs.prefetch_related('project_locale__project')
if 'locale.localizations.project.localizations' in fields:
raise Exception('Cyclic queries are forbidden')
return qs.get(code=args[
|
'code'])
schema = graphene.Schema(query=Query)
|
gabrielsaldana/sqmc
|
sabesqmc/quote/tests/test_forms.py
|
Python
|
agpl-3.0
| 2,384
| 0.001258
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.test import TestCase
from ..forms import QuoteForm
class TestQuoteForm(TestCase):
def setUp(self):
pass
def test_validate_emtpy_quote(self):
form = QuoteForm({'message': ''})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': ' '})
self.assertFalse(form.is_valid())
def test_validate_invalid_quote(self):
form = QuoteForm({'message': 'Mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'mensaje invalido'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'me n
|
saje invalido'})
self.assertFalse(form.is_valid())
def test_urls_in_quote(self):
form = QuoteForm({'message': 'http://122.33.43.322'})
self.assertFalse(form.is_valid())
form = Q
|
uoteForm({'message': 'Me caga http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'http://sabesquemecaga.com/asdfads/'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga http://www.sabesquemecaga.com/test/12'})
self.assertFalse(form.is_valid())
def test_emails_in_quote(self):
form = QuoteForm({'message': 'Me caga test@test.com'})
self.assertFalse(form.is_valid())
form = QuoteForm({'message': 'Me caga test.this@test.asdfas.com'})
self.assertFalse(form.is_valid())
def test_validate_short_quote(self):
form = QuoteForm({'message': 'Me caga '})
self.assertFalse(form.is_valid())
def test_validate_long_quote(self):
form = QuoteForm({'message': 'Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar asdfadfa adsfasdfa. Me caga que sea que Este mensaje es demasiado largo y no pase las pruebas de lo que tenemos que probar.'})
self.assertFalse(form.is_valid())
def test_valid_message(self):
form = QuoteForm({'message': 'Me caga probar esto'})
self.assertTrue(form.is_valid())
|
six8/transloader
|
setup.py
|
Python
|
mit
| 1,083
| 0.012927
|
try:
from setuptools import setup
except ImportError:
from distutils.core
|
import setup
def main():
setup(
name = 'trans
|
loader',
packages=['transloader'],
package_dir = {'transloader':'transloader'},
version = open('VERSION.txt').read().strip(),
author='Mike Thornton',
author_email='six8@devdetails.com',
url='http://github.com/six8/transloader',
download_url='http://github.com/six8/transloader',
keywords=['transloadit'],
license='MIT',
description='A transloadit client',
classifiers = [
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description=open('README.rst').read(),
install_requires = [
'requests'
]
)
if __name__ == '__main__':
main()
|
ActiveState/code
|
recipes/Python/347462_Terminating_subprocess/recipe-347462.py
|
Python
|
mit
| 817
| 0.00612
|
# Create a process that won't end on its own
import subprocess
process = subprocess.Popen(['python.exe', '-c', 'while 1: pass'])
# Kill the process using pywin32
import win32api
win32api.TerminateProcess(int(process._handle), -1)
# Kill the process using ctypes
import ctypes
ctypes.windll.kernel32.TerminateProcess(in
|
t(process._handle), -1)
# Kill the proces using pywin32 and pid
import win32api
PROCESS_TERMINATE = 1
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, process.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
# Kill the proces using ctypes and pid
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False
|
, process.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
|
gsehub/edx-platform
|
cms/djangoapps/contentstore/api/views/course_quality.py
|
Python
|
agpl-3.0
| 9,779
| 0.002761
|
# pylint: disable=missing-docstring
import logging
import numpy as np
from scipy import stats
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from edxval.api import get_videos_for_course
from openedx.core.djangoapps.request_cache.middleware import request_cached
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from openedx.core.lib.graph_traversals import traverse_pre_order
from xmodule.modulestore.django import modulestore
from .utils import get_bool_param, course_author_access_required
log = logging.getLogger(__name__)
@view_auth_classes()
class CourseQualityView(DeveloperErrorViewMixin, GenericAPIView):
"""
**Use Case**
**Example Requests**
GET /api/courses/v1/quality/{course_id}/
**GET Parameters**
A GET request may include the following parameters.
* all
* sections
* subsections
* units
* videos
* exclude_graded (boolean) - whether to exclude graded subsections in the subsections and units information.
**GET Response Values**
The HTTP 200 response has the following values.
* is_self_paced - whether the course is self-paced.
* sections
* total_number - number of sections in the course.
* total_visible - number of sections visible to learners in the course.
* number_with_highlights - number of sections that have at least one highlight entered.
* highlights_enabled - whether highlights are enabled in the course.
* subsections
* total_visible - number of subsections visible to learners in the course.
* num_with_one_block_type - number of visible subsections containing only one type of block.
* num_block_types - statistics for number of block types across all visible subsections.
* min
* max
* mean
* median
* mode
* units
* total_visible - number of units visible to learners in the course.
* num_blocks - statistics for number of block across all visible units.
* min
* max
* mean
* median
* mode
* videos
* total_number - number of video blocks in the course.
* num_with_val_id - number of video blocks that include video pipeline IDs.
* num_mobile_encoded - number of videos encoded through the video pipeline.
* durations - statistics for video duration across all videos encoded through the video pipeline.
* min
* max
* mean
* median
* mode
"""
@course_author_access_required
def get(self, request, course_key):
"""
Returns validation information for the given course.
"""
all_requested = get_bool_param(request, 'all', False)
store = modulestore()
with store.bulk_operations(course_key):
course = store.get_course(course_key, depth=self._required_course_depth(request, all_requested))
response = dict(
is_self_paced=course.self_paced,
)
if get_bool_param(request, 'sections', all_requested):
response.update(
sections=self._sections_quality(course)
)
if get_bool_param(request, 'subsections', all_requested):
response.update(
subsections=self._subsections_quality(course, request)
)
if get_bool_param(request, 'units', all_requested):
response.update(
units=self._units_quality(course, request)
)
if get_bool_param(request, 'videos', all_requested):
response.update(
videos=self._videos_quality(course)
)
return Response(response)
def _required_course_depth(self, request, all_requested):
if get_bool_param(request, 'units', all_requested):
# The num_blocks metric for "units" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'subsections', all_requested):
# The num_block_types metric for "subsections" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'sections', all_requested):
return 1
else:
return 0
def _sections_quality(self, course):
sections, visible_sections = self._get_sections(course)
sections_with_highlights = [s for s in visible_sections if s.highlights]
return dict(
total_number=len(sections),
total_visible=len(visible_sections),
number_with_highlights=len(sections_with_highlights),
highlights_enabled=course.highlights_enabled_for_messaging,
)
def _subsections_quality(self, course, request):
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_block_types_per_subsection_dict = {}
for subsection_key, unit_dict in subsection_unit_dict.iteritems():
leaf_block_types_in_subsection = (
unit_info['leaf_block_types']
for unit_info in unit_dict.itervalues()
)
num_block_types_per_subsection_dict[subsection_key] = len(set().union(*leaf_block_types_in_subsection))
return dict(
total_visible=len(num_block_types_per_subsection_dict),
num_with_one_block_type=list(num_block_types_per_subsection_dict.itervalues()).count(1),
num_block_types=self._stats_dict(list(num_block_types_per_subsection_dict.itervalues())),
)
def _units_quality(self, course, request):
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_leaf_blocks_per_unit = [
unit_info['num_leaf_blocks']
for unit_dict in subsection_unit_dict.itervalues()
for unit_info in unit_dict.itervalues()
]
return dict(
total_visible=len(num_leaf_blocks_per_unit),
num_blocks=self._stats_dict(num_leaf_blocks_per_unit),
)
def _videos_quality(self, course):
video_blocks_in_course = modulestore().get_items(course.id, qualifiers={'category': 'video'})
videos_in_val = list(get_videos_for_course(course.id))
video_durations = [video['duration'] for video in videos_in_val]
return dict(
total_number=len(video_blocks_in_course),
num_mobile_encoded=len(videos_in_val),
num_with_val_id=len([v for v in video_blocks_in_course if v.edx_video_id]),
durations=self._stats_dict(video_durations),
)
@request_cached
def _get_subsections_and_units(self, course, request):
"""
Returns {subsection_key: {unit_key: {num_leaf_blocks: <>, leaf_block_types: set(<>) }}}
for all visible subsections and units.
"""
_, visible_sections = self._get_sections(course)
subsection_dict = {}
for sect
|
ion in visible_sections:
visible_subsections = self._get_visible_children(section)
if get_bool_param(request, 'exclude_graded', False):
visible_subsections = [s for s in visible_subsections if not s.graded]
for subsection in visible_subsections:
unit_dict = {}
visible_units = self._get_visible_children(s
|
ubsection)
for unit in visible_units:
leaf_blocks = self._get_leaf_blocks(unit)
unit_dict[unit.location] = dict(
num_leaf_blocks=len(leaf_blocks),
leaf_block_types=set(block.location.block_type for block in leaf_blocks),
)
subsection_dict[subsection.location] = unit_dict
return subsection_dict
@request_cached
def _get_
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractFinebymetranslationsWordpressCom.py
|
Python
|
bsd-3-clause
| 666
| 0.028529
|
def extractFinebymetranslationsWordpressCom(item):
'''
Parser for 'finebymetranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
|
('Death Progress Bar', 'Death Progress Bar', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
r
|
eturn buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
pengzhangdev/slackbot
|
slackbot/plugins/component/ttsdriver/iflytek.py
|
Python
|
mit
| 4,519
| 0.005567
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# iflytek.py ---
#
# Filename: iflytek.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Thu Sep 14 09:01:20 2017 (+0800)
#
# Change Log:
#
#
import time
from ctypes import *
from io import BytesIO
import wave
import platform
import logging
import os
import contextlib
logging.basicConfig(level=logging.DEBUG)
BASEPATH=os.path.split(os.path.realpath(__file__))[0]
def not_none_return(obj, defobj):
|
if obj:
return obj
else:
|
return defobj
class iflytekTTS():
def __init__(self, appid=None, voice_name=None, speed=None, volume=None, pitch=None):
self.__appid = not_none_return(appid, '59b4d5d4')
self.__voice_name = not_none_return(voice_name, 'xiaowanzi')
self.__speed = not_none_return(speed, 50)
self.__volume = not_none_return(volume, 50)
self.__pitch = not_none_return(pitch, 50)
self.__cur = cdll.LoadLibrary(os.path.join(BASEPATH, 'iflytek/libmsc.so'))
self.__iflytek_init()
def __save_file(self, raw_data, _tmpFile = '/tmp/test.wav'):
if os.path.exists(_tmpFile) :
return
tmpFile = _tmpFile + '.tmp'
with contextlib.closing(wave.open(tmpFile , 'w')) as f:
f.setparams((1, 2, 16000, 262720, 'NONE', 'not compressed'))
f.writeframesraw(raw_data)
os.rename(tmpFile, _tmpFile)
def __iflytek_init(self):
MSPLogin = self.__cur.MSPLogin
ret = MSPLogin(None,None,'appid = {}, work_dir = .'.format(self.__appid))
if ret != 0:
logging.error("MSPLogin failed, error code: {}".format(ret))
return False
return True
def get_tts_audio(self, src_text, filename, language='zh', options=None):
fname = os.path.join('/tmp/', filename + '.' + 'wav')
QTTSSessionBegin = self.__cur.QTTSSessionBegin
QTTSTextPut = self.__cur.QTTSTextPut
QTTSAudioGet = self.__cur.QTTSAudioGet
QTTSAudioGet.restype = c_void_p
QTTSSessionEnd = self.__cur.QTTSSessionEnd
ret_c = c_int(0)
ret = 0
session_begin_params="voice_name = {}, text_encoding = utf8, sample_rate = 16000, speed = {}, volume = {}, pitch = {}, rdn = 2".format(self.__voice_name, self.__speed, self.__volume, self.__pitch)
sessionID = QTTSSessionBegin(session_begin_params, byref(ret_c))
if ret_c.value == 10111: # 没有初始化
if self.__iflytek_init():
return self.get_tts_audio(src_text, filename)
if ret_c.value != 0:
logging.error("QTTSSessionBegin failed, error code: {}".format(ret_c.value))
return
ret = QTTSTextPut(sessionID, src_text, len(src_text), None)
if ret != 0:
logging.error("QTTSTextPut failed, error code:{}".format(ret))
QTTSSessionEnd(sessionID, "TextPutError")
return
logging.info("正在合成 [{}]...".format(src_text))
audio_len = c_uint(0)
synth_status = c_int(0)
f = BytesIO()
while True:
p = QTTSAudioGet(sessionID, byref(audio_len), byref(synth_status), byref(ret_c))
if ret_c.value != 0:
logging.error("QTTSAudioGet failed, error code: {}".format(ret_c))
QTTSSessionEnd(sessionID, "AudioGetError")
break
if p != None:
buf = (c_char * audio_len.value).from_address(p)
f.write(buf)
if synth_status.value == 2:
self.__save_file(f.getvalue(), fname)
break
time.sleep(0.5)
logging.info('合成完成!')
ret = QTTSSessionEnd(sessionID, "Normal")
if ret != 0:
logging.error("QTTSSessionEnd failed, error code:{}".format(ret))
return ('wav', fname)
if __name__ == '__main__':
tts = iflytekTTS()
def md5sum(contents):
import hashlib
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
import sys
basename = md5sum(sys.argv[1])
t, f = tts.get_tts_audio(sys.argv[1], basename, 'zh');
def mplayer(f):
import commands
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 82 {}'.format(f))
mplayer(f)
import os
print f
basename = md5sum(sys.argv[1][:-1])
t, f = tts.get_tts_audio(sys.argv[1][:-1], basename, 'zh');
print f
#os.remove(f)
|
cuducos/findaconf
|
migrations/env.py
|
Python
|
mit
| 2,277
| 0.000878
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file
|
for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlal
|
chemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url,
transaction_per_migration=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
transaction_per_migration=True)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
avaika/avaikame
|
project/travel/migrations/0005_auto_20160917_2211.py
|
Python
|
gpl-3.0
| 868
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-17 19:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travel', '0004_auto_20160319_0055'),
]
operations = [
migrations.RemoveField(
model_name='
|
post',
name='category',
),
migrations.RemoveField(
model_name='post',
name='post',
),
migrations.RemoveField(
model_name='post',
name='post_en',
),
migrations.RemoveField(
model_name='post',
name='post_ru',
),
migrations.RemoveField(
model_name='tag',
name='category',
),
migrations.DeleteModel(
name='Catego
|
ry',
),
]
|
georgemarshall/django
|
django/shortcuts.py
|
Python
|
bsd-3-clause
| 4,896
| 0.00143
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils.functional import Promise
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Return a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, permanent=False, **kwargs):
"""
Return an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
Issues a temporary redirect by default; pass permanent=True to issue a
permanent redirect.
"""
redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Use get() to return an object, or raise a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Like with QuerySet.get(), MultipleObjectsReturned is raised if more than
one object is found.
"""
queryset = _get_queryset(klass)
if not hasattr(queryset, 'get'):
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Use filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
if not hasattr(queryset, 'filter'):
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
|
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
|
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = str(to)
if isinstance(to, str):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
esikachev/scenario
|
sahara/tests/unit/service/test_ops.py
|
Python
|
apache-2.0
| 7,257
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins import base as base_plugins
from sahara.service import ops
from sahara.tests.unit import base
class FakeCluster(object):
id = 'id'
status = "Some_status"
name = "Fake_cluster"
class FakeNodeGroup(object):
id = 'id'
count = 2
instances = [1, 2]
class FakePlugin(mock.Mock):
node_groups = [FakeNodeGroup()]
def update_infra(self, cluster):
TestOPS.SEQUENCE.append('update_infra')
def configure_cluster(self, cluster):
TestOPS.SEQUENCE.append('configure_cluster')
def start_cluster(self, cluster):
TestOPS.SEQUENCE.append('start_cluster')
def on_terminate_cluster(self, cluster):
TestOPS.SEQUENCE.append('on_terminate_cluster')
def decommission_nodes(self, cluster, instances_to_delete):
TestOPS.SEQUENCE.append('decommission_nodes')
def scale_cluster(self, cluster, node_group_id_map):
TestOPS.SEQUENCE.append('plugin.scale_cluster')
def cluster_destroy(self, ctx, cluster):
TestOPS.SEQUENCE.append('cluster_destroy')
class FakeINFRA(object):
def create_cluster(self, cluster):
TestOPS.SEQUENCE.append('create_cluster')
def scale_cluster(self, cluster, node_group_id_map):
TestOPS.SEQUENCE.append('INFRA.scale_cluster')
return True
def shutdown_cluster(self, cluster):
TestOPS.SEQUENCE.append('shutdown_cluster')
def rollback_cluster(self, cluster, reason):
TestOPS.SEQUENCE.append('rollback_cluster')
class TestOPS(base.SaharaWithDbTestCase):
SEQUENCE = []
@mock.patch('sahara.utils.general.change_cluster_status_description',
return_value=FakeCluster())
@mock.patch('sahara.service.ops._update_sahara_info')
@mock.patch('sahara.service.ops._prepare_provisioning',
return_value=(mock.Mock(), mock.Mock(), FakePlugin()))
@mock.patch('sahara.utils.general.change_cluster_status')
@mock.patch('sahara.conductor.API.cluster_get')
@mock.patch('sahara.service.ops.CONF')
@mock.patch('sahara.service.trusts.create_trust_for_cluster')
@mock.patch('sahara.conductor.API.job_execution_get_all')
@mock.patch('sahara.service.edp.job_manager.run_job')
def test_provision_cluster(self, p_run_job, p_job_exec, p_create_trust,
p_conf, p_cluster_get, p_change_status,
p_prep_provisioning, p_update_sahara_info,
p_change_cluster_status_desc):
del self.SEQUENCE[:]
ops.INFRA = FakeINFRA()
ops._provision_cluster('123')
# checking that order of calls is right
self.assertEqual(['update_infra', 'create_cluster',
'configure_cluster', 'start_cluster'], self.SEQUENCE,
|
'Order of calls is wrong')
@mock.patch('sahara.service.ops._prepare_provisioning',
return_value=(mock.Mock(), mock.Mock(), FakePlugin()))
@mock.patch('sahara.utils.general.change_cluster_status',
return_value=FakePlugin())
@mock.patch('sahara.utils.general.get_instances')
def test_provision_scaled_cluster(self, p_get_instances, p_change_status,
p_prep_provisioni
|
ng):
del self.SEQUENCE[:]
ops.INFRA = FakeINFRA()
ops._provision_scaled_cluster('123', {'id': 1})
# checking that order of calls is right
self.assertEqual(['decommission_nodes', 'INFRA.scale_cluster',
'plugin.scale_cluster'], self.SEQUENCE,
'Order of calls is wrong')
@mock.patch('sahara.service.ops.CONF')
@mock.patch('sahara.service.trusts.delete_trust_from_cluster')
@mock.patch('sahara.context.ctx')
def test_terminate_cluster(self, p_ctx, p_delete_trust, p_conf):
del self.SEQUENCE[:]
base_plugins.PLUGINS = FakePlugin()
base_plugins.PLUGINS.get_plugin.return_value = FakePlugin()
ops.INFRA = FakeINFRA()
ops.conductor = FakePlugin()
ops.terminate_cluster('123')
# checking that order of calls is right
self.assertEqual(['on_terminate_cluster', 'shutdown_cluster',
'cluster_destroy'], self.SEQUENCE,
'Order of calls is wrong')
@mock.patch('sahara.utils.general.change_cluster_status_description')
@mock.patch('sahara.service.ops._prepare_provisioning')
@mock.patch('sahara.utils.general.change_cluster_status')
@mock.patch('sahara.service.ops._rollback_cluster')
@mock.patch('sahara.conductor.API.cluster_get')
def test_ops_error_hadler_success_rollback(
self, p_cluster_get, p_rollback_cluster, p_change_cluster_status,
p__prepare_provisioning, p_change_cluster_status_desc):
# Test scenario: failed scaling -> success_rollback
fake_cluster = FakeCluster()
p_change_cluster_status_desc.return_value = FakeCluster()
p_rollback_cluster.return_value = True
p_cluster_get.return_value = fake_cluster
p__prepare_provisioning.side_effect = ValueError('error1')
expected = [
mock.call(fake_cluster, 'Active',
'Scaling cluster failed for the following '
'reason(s): error1')
]
ops._provision_scaled_cluster(fake_cluster.id, {'id': 1})
self.assertEqual(expected, p_change_cluster_status.call_args_list)
@mock.patch('sahara.utils.general.change_cluster_status_description')
@mock.patch('sahara.service.ops._prepare_provisioning')
@mock.patch('sahara.utils.general.change_cluster_status')
@mock.patch('sahara.service.ops._rollback_cluster')
@mock.patch('sahara.conductor.API.cluster_get')
def test_ops_error_hadler_failed_rollback(
self, p_cluster_get, p_rollback_cluster, p_change_cluster_status,
p__prepare_provisioning, p_change_cluster_status_desc):
# Test scenario: failed scaling -> failed_rollback
fake_cluster = FakeCluster()
p_change_cluster_status_desc.return_value = FakeCluster()
p__prepare_provisioning.side_effect = ValueError('error1')
p_rollback_cluster.side_effect = ValueError('error2')
p_cluster_get.return_value = fake_cluster
expected = [
mock.call(
fake_cluster, 'Error', 'Scaling cluster failed for the '
'following reason(s): error1, error2')
]
ops._provision_scaled_cluster(fake_cluster.id, {'id': 1})
self.assertEqual(expected, p_change_cluster_status.call_args_list)
|
managai/myCert
|
vcert/wsgi.py
|
Python
|
mpl-2.0
| 1,416
| 0.000706
|
"""
WSGI config for vcert project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runse
|
rver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a
|
DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "vcert.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vcert.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
Samsung/ADBI
|
arch/arm/tests/blx_reg_a1.py
|
Python
|
apache-2.0
| 580
| 0.008621
|
import random
from test import *
from
|
branch import *
def test(rm, fn):
name = 'test_blx_reg_a1_%s' % tn()
cleanup = asm_wrap(name, 'r0')
print ' adr %s, %s' % (rm, fn)
if fn.startswith('thumb'):
print ' orr %s, #1' %
|
(rm)
print '%s_tinsn:' % name
print ' blx %s' % (rm)
cleanup()
def iter_cases():
fun = 'thumb_fun_b thumb_fun_f arm_fun_b arm_fun_f'.split()
while True:
yield random.choice(T32REGS), random.choice(fun)
branch_helpers('b')
print ' .arm'
tests(test, iter_cases(), 30)
branch_helpers('f')
|
airbnb/airflow
|
tests/providers/telegram/hooks/test_telegram.py
|
Python
|
apache-2.0
| 7,973
| 0.003136
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.hooks.telegram import TelegramHook
from airflow.utils import db
TELEGRAM_TOKEN = "dummy token"
class TestTelegramHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
|
Connection(
conn_id='telegram-webhook-without-token',
conn_type=
|
'http',
)
)
db.merge_conn(
Connection(
conn_id='telegram_default',
conn_type='http',
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id='telegram-webhook-with-chat_id',
conn_type='http',
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_raise_exception_if_both_connection_or_token_is_not_provided(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook()
self.assertEqual("Cannot get token: No valid Telegram connection supplied.", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_exist(self):
with self.assertRaises(airflow.exceptions.AirflowNotFoundException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-non-existent')
self.assertEqual("The conn_id `telegram-webhook-non-existent` isn't defined", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-without-token')
self.assertEqual("Missing token(password) in Telegram connection", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"text": "test telegram message"})
self.assertEqual("'chat_id' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222})
self.assertEqual("'text' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222, "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default', chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = mock.Mock(password="some_token")
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_get_conn.return_value.send_message.side_effect = side_effect
with self.assertRaises(Exception) as e:
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
self.assertTrue("RetryError" in str(e.exception))
self.assertTrue("state=finished raised TelegramError" in str(e.exception))
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
self.assertEqual(excepted_retry_count, mock_get_conn.return_value.send_message.call_count)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
|
snakeleon/YouCompleteMe-x64
|
third_party/ycmd/third_party/jedi_deps/jedi/test/completion/lambdas.py
|
Python
|
gpl-3.0
| 1,833
| 0.022368
|
# -----------------
# lambdas
# -----------------
a = lambda: 3
#? int()
a()
x = []
a = lambda x: x
#? int()
a(0)
#? float()
(lambda x: x)(3.0)
arg_l = lambda x, y: y, x
#? float()
arg_l[0]('', 1.0)
#? list()
arg_l[1]
arg_l = lambda x, y: (y, x)
args = 1,""
result = arg_l(*args)
#? tuple()
result
#? str()
result[0]
#? int()
result[1]
def with_lambda(callable_lambda, *args, **kwargs):
return callable_lambda(1, *args, **kwargs)
#? int()
with_lambda(arg_l, 1.0)[1]
#? float()
with_lambda(arg_l, 1.0)[0]
#
|
? float()
with_lambda(arg_l, y=1.0)[0]
#? int()
with_lambda(lambda x: x)
#? float()
with_lambda(lambda x, y: y, y=1.0)
arg_func = lambda *args, **kwargs: (args[0], kwargs['a'])
#? int()
arg_func(1, 2, a='', b=10)[0]
#? list()
arg_func(1, 2, a=[], b=10)[1]
# magic method
a = lambda: 3
#? ['__closure__']
a.__closure__
|
class C():
def __init__(self, foo=1.0):
self.a = lambda: 1
self.foo = foo
def ret(self):
return lambda: self.foo
def with_param(self):
return lambda x: x + self.a()
lambd = lambda self: self.foo
#? int()
C().a()
#? str()
C('foo').ret()()
index = C().with_param()(1)
#? float()
['', 1, 1.0][index]
#? float()
C().lambd()
#? int()
C(1).lambd()
def xy(param):
def ret(a, b):
return a + b
return lambda b: ret(param, b)
#? int()
xy(1)(2)
# -----------------
# lambda param (#379)
# -----------------
class Test(object):
def __init__(self, pred=lambda a, b: a):
self.a = 1
#? int()
self.a
#? float()
pred(1.0, 2)
# -----------------
# test_nocond in grammar (happens in list comprehensions with `if`)
# -----------------
# Doesn't need to do anything yet. It should just not raise an error. These
# nocond lambdas make no sense at all.
#? int()
[a for a in [1,2] if lambda: 3][0]
|
macosforge/ccs-calendarserver
|
txdav/common/datastore/podding/migration/work.py
|
Python
|
apache-2.0
| 5,171
| 0.003674
|
##
# Copyright (c) 2015-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.record import fromTable
from twext.enterprise.jobs.worki
|
tem import WorkItem
from twisted.internet.defer import inlineCallbacks
from txdav.caldav.datastore.scheduling.imip.token import iMIPTokenRecord
from txdav.caldav.datastore.scheduling.work import allScheduleWork
from txdav.common.datastore.podding.migration.sync_metadata import CalendarMigrationRecord, \
CalendarObjectMigrationRecord, AttachmentMigrationRecord
from txdav.common.datastore.sql_directory import Del
|
egateRecord, \
DelegateGroupsRecord, ExternalDelegateGroupsRecord
from txdav.common.datastore.sql_tables import schema, _HOME_STATUS_DISABLED
class HomeCleanupWork(WorkItem, fromTable(schema.HOME_CLEANUP_WORK)):
"""
Work item to clean up any previously "external" homes on the pod to which data was migrated to. Those
old homes will now be marked as disabled and need to be silently removed without any side effects
(i.e., no implicit scheduling, no sharing cancels, etc).
"""
group = "ownerUID"
notBeforeDelay = 300 # 5 minutes
@inlineCallbacks
def doWork(self):
"""
Delete all the corresponding homes.
"""
oldhome = yield self.transaction.calendarHomeWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
if oldhome is not None:
yield oldhome.purgeAll()
oldnotifications = yield self.transaction.notificationsWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
if oldnotifications is not None:
yield oldnotifications.purge()
class MigratedHomeCleanupWork(WorkItem, fromTable(schema.MIGRATED_HOME_CLEANUP_WORK)):
"""
Work item to clean up the old home data left behind after migration, as well
as other unwanted items like iMIP tokens, delegates etc. The old homes will
now be marked as disabled and need to be silently removed without any side
effects (i.e., no implicit scheduling, no sharing cancels, etc).
"""
group = "ownerUID"
notBeforeDelay = 300 # 5 minutes
@inlineCallbacks
def doWork(self):
"""
Delete all the corresponding homes, then the ancillary data.
"""
oldhome = yield self.transaction.calendarHomeWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
if oldhome is not None:
# Work items - we need to clean these up before the home goes away because we have an "on delete cascade" on the WorkItem
# table, and if that ran it would leave orphaned Job rows set to a pause state and those would remain for ever in the table.
for workType in allScheduleWork:
items = yield workType.query(self.transaction, workType.homeResourceID == oldhome.id())
for item in items:
yield item.remove()
yield oldhome.purgeAll()
oldnotifications = yield self.transaction.notificationsWithUID(self.ownerUID, status=_HOME_STATUS_DISABLED)
if oldnotifications is not None:
yield oldnotifications.purge()
# These are things that reference the home id or the user UID but don't get removed via a cascade
# iMIP tokens
cuaddr = "urn:x-uid:{}".format(self.ownerUID)
yield iMIPTokenRecord.deletesome(
self.transaction,
iMIPTokenRecord.organizer == cuaddr,
)
# Delegators - individual and group
yield DelegateRecord.deletesome(self.transaction, DelegateRecord.delegator == self.ownerUID)
yield DelegateGroupsRecord.deletesome(self.transaction, DelegateGroupsRecord.delegator == self.ownerUID)
yield ExternalDelegateGroupsRecord.deletesome(self.transaction, ExternalDelegateGroupsRecord.delegator == self.ownerUID)
class MigrationCleanupWork(WorkItem, fromTable(schema.MIGRATION_CLEANUP_WORK)):
group = "homeResourceID"
notBeforeDelay = 300 # 5 minutes
@inlineCallbacks
def doWork(self):
"""
Delete all the corresponding migration records.
"""
yield CalendarMigrationRecord.deletesome(
self.transaction,
CalendarMigrationRecord.calendarHomeResourceID == self.homeResourceID,
)
yield CalendarObjectMigrationRecord.deletesome(
self.transaction,
CalendarObjectMigrationRecord.calendarHomeResourceID == self.homeResourceID,
)
yield AttachmentMigrationRecord.deletesome(
self.transaction,
AttachmentMigrationRecord.calendarHomeResourceID == self.homeResourceID,
)
|
gridengine/config-api
|
test/test_parallel_environment.py
|
Python
|
apache-2.0
| 5,641
| 0.000709
|
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
import tempfile
import types
from .utils import needs_uge
from .utils import generate_random_string
from .utils import create_config_file
from .utils import load_values
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
create_config_file()
API = QconfApi()
PE_NAME = '%s.q' % generate_random_string(6)
CONFIG_MANAGER = ConfigManager.get_instance()
LOG_MANAGER = LogManager.get_instance()
VALUES_DICT = load_values('test_values.json')
print(VALUES_DICT)
@needs_uge
def test_object_not_found():
try:
pe = API.get_pe('__non_existent_pe__')
assert (False)
except ObjectNotFound as ex:
# ok
pass
def test_generate_pe():
pe = API.generate_pe(PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
def test_add_pe():
try:
pel = API.list_pes()
except ObjectNotFound as ex:
# no pes defined
pel = []
pe = API.add_pe(name=PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
pel2 = API.list_pes()
assert (len(pel2) == len(pel) + 1)
assert (pel2.count(PE_NAME) == 1)
def test_list_pes():
pel = API.list_pes()
assert (pel is not None)
def test_object_already_exists():
try:
pe = API.add_pe(name=PE_NAME)
assert (False)
except ObjectAlreadyExists as ex:
# ok
pass
def test_get_pe():
pe = API.get_pe(PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
def test_generate_pe_from_json():
pe = API.get_pe(PE_NAME)
json = pe.to_json()
pe2 = API.generate_object(json)
assert (pe2.__class__.__name__ == pe.__class__.__name__)
for key in list(pe.data.keys()):
v = pe.data[key]
v2 = pe2.data[key]
if type(v) == list:
assert (len(v) == len(v2))
for s in v:
assert (v2.count(s) == 1)
elif type(v) == dict:
for key in list(v.keys()):
assert (str(v[key]) == str(v2[key]))
else:
assert (str(v) == str(v2))
def tes
|
t_modify_pe():
pe = API.get_pe(PE_NAME)
slots = pe.data['slots']
pe = API.modify_pe(name=PE_NAME, data={'slots': slots + 1})
slots2 = pe.data['slots']
assert (slots2 == slots + 1)
def test_get_acls():
pel = API.
|
list_pes()
pes = API.get_pes()
for pe in pes:
print("#############################################")
print(pe.to_uge())
assert (pe.data['pe_name'] in pel)
def test_write_pes():
try:
tdir = tempfile.mkdtemp()
print("*************************** " + tdir)
pe_names = VALUES_DICT['pe_names']
pes = API.get_pes()
for pe in pes:
print("Before #############################################")
print(pe.to_uge())
new_pes = []
for name in pe_names:
npe = API.generate_pe(name=name)
new_pes.append(npe)
API.mk_pes_dir(tdir)
API.write_pes(new_pes, tdir)
API.add_pes_from_dir(tdir)
API.modify_pes_from_dir(tdir)
pes = API.get_pes()
for pe in pes:
print("After #############################################")
print(pe.to_uge())
pes = API.list_pes()
for name in pe_names:
assert (name in pes)
print("pe found: " + name)
finally:
API.delete_pes_from_dir(tdir)
API.rm_pes_dir(tdir)
def test_add_pes():
try:
new_pes = []
pe_names = VALUES_DICT['pe_names']
for name in pe_names:
npe = API.generate_pe(name=name)
new_pes.append(npe)
# print all pes currently in the cluster
pes = API.get_pes()
for pe in pes:
print("Before #############################################")
print(pe.to_uge())
# add pes
API.add_pes(new_pes)
API.modify_pes(new_pes)
# print all pes currently in the cluster
pes = API.get_pes()
for pe in pes:
print("After #############################################")
print(pe.to_uge())
# check that cals have been added
pes = API.list_pes()
for name in pe_names:
assert (name in pes)
print("pe found: " + name)
finally:
API.delete_pes(new_pes)
def test_delete_pe():
pel = API.list_pes()
API.delete_pe(PE_NAME)
try:
pel2 = API.list_pes()
except ObjectNotFound as ex:
# no pes defined
pel2 = []
assert (len(pel2) == len(pel) - 1)
assert (pel2.count(PE_NAME) == 0)
|
mohittahiliani/tcp-eval-suite-ns3
|
src/nix-vector-routing/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 373,845
| 0.015033
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.nix_vector_routing', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('Interfac
|
eAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class]
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import
|
_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## node-list.h (module 'network'): ns3::NodeList [class]
module.add_class('NodeList', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.