repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Schevo/schevo
|
refs/heads/master
|
schevo/example/moviereviews/schema/__init__.py
|
12133432
| |
Nitaco/ansible
|
refs/heads/devel
|
test/units/playbook/__init__.py
|
12133432
| |
apporc/neutron
|
refs/heads/master
|
neutron/tests/functional/sanity/__init__.py
|
12133432
| |
robovm/robovm-studio
|
refs/heads/master
|
python/testData/refactoring/move/relativeImportsToModulesInSameMovedPackageNotUpdated/after/src/subpkg/subsubpkg/m4.py
|
12133432
| |
ASOdesk/selenium-pytest-fix
|
refs/heads/pytest-fix
|
py/test/selenium/webdriver/common/frame_switching_tests.py
|
9
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from http.client import BadStatusLine
except ImportError:
from httplib import BadStatusLine
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchFrameException,
TimeoutException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver doesn't do anything fishy when it navigates to a page with frames.
#
# ----------------------------------------------------------------------------------------------
def testShouldAlwaysFocusOnTheTopMostFrameAfterANavigationEvent(driver, pages):
pages.load("frameset.html")
driver.find_element(By.TAG_NAME, "frameset") # Test passes if this does not throw.
def testShouldNotAutomaticallySwitchFocusToAnIFrameWhenAPageContainingThemIsLoaded(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe_page_heading")
def testShouldOpenPageWithBrokenFrameset(driver, pages):
pages.load("framesetPage3.html")
frame1 = driver.find_element(By.ID, "first")
driver.switch_to.frame(frame1)
driver.switch_to.default_content()
frame2 = driver.find_element(By.ID, "second")
driver.switch_to.frame(frame2) # IE9 can not switch to this broken frame - it has no window.
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver can switch to frames as expected.
#
# ----------------------------------------------------------------------------------------------
def testShouldBeAbleToSwitchToAFrameByItsIndex(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(1)
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldBeAbleToSwitchToAnIframeByItsIndex(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsName(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fourth")
assert driver.find_element(By.TAG_NAME, "frame").get_attribute("name") == "child1"
def testShouldBeAbleToSwitchToAnIframeByItsName(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1-name")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsID(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fifth")
assert driver.find_element(By.NAME, "windowOne").text == "Open new window"
def testShouldBeAbleToSwitchToAnIframeByItsID(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToFrameWithNameContainingDot(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("sixth.iframe1")
assert "Page number 3" in driver.find_element(By.TAG_NAME, "body").text
def testShouldBeAbleToSwitchToAFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frame")
driver.switch_to.frame(frame)
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToAnIFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("iframes.html")
frame = driver.find_element(By.TAG_NAME, "iframe")
driver.switch_to.frame(frame)
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
def testShouldEnsureElementIsAFrameBeforeSwitching(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frameset")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(frame)
def testFrameSearchesShouldBeRelativeToTheCurrentlySelectedFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("second")
assert driver.find_element(By.ID, "pageNumber").text == "2"
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("third"))
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("third"))
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame("second")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("second"))
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldSelectChildFramesByChainedCalls(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testShouldThrowFrameNotFoundExceptionLookingUpSubFramesWithSuperFrameNames(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("second"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFound(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("Nothing here"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFoundByIndex(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(27)
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("first"))
assert driver.find_element(By.ID, "pageNumber").text == "1"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrameFromASecondLevelFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child1"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testSwitchingToParentFrameFromDefaultContextIsNoOp(driver, pages):
pages.load("xhtmlTest.html")
driver.switch_to.parent_frame()
assert driver.title == "XHTML Test Page"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFromAnIframe(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.parent_frame()
driver.find_element(By.ID, "iframe_page_heading")
# ----------------------------------------------------------------------------------------------
#
# General frame handling behavior tests
#
# ----------------------------------------------------------------------------------------------
def testShouldContinueToReferToTheSameFrameOnceItHasBeenSelected(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(2)
checkbox = driver.find_element(By.XPATH, "//input[@name='checky']")
checkbox.click()
checkbox.submit()
# TODO(simon): this should not be needed, and is only here because IE's submit returns too
# soon.
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.XPATH, '//p'), 'Success!'))
@pytest.mark.xfail_marionette(raises=TimeoutException)
def testShouldFocusOnTheReplacementWhenAFrameFollowsALinkToA_TopTargetedPage(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(0)
driver.find_element(By.LINK_TEXT, "top").click()
expectedTitle = "XHTML Test Page"
WebDriverWait(driver, 3).until(EC.title_is(expectedTitle))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "only-exists-on-xhtmltest")))
def testShouldAllowAUserToSwitchFromAnIframeBackToTheMainContentOfThePage(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.default_content()
driver.find_element(By.ID, "iframe_page_heading")
def testShouldAllowTheUserToSwitchToAnIFrameAndRemainFocusedOnIt(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.find_element(By.ID, "submitButton").click()
assert getTextOfGreetingElement(driver) == "Success!"
def getTextOfGreetingElement(driver):
return WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "greeting"))).text
def testShouldBeAbleToClickInAFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("third")
# This should replace frame "third" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "third" ...
assert getTextOfGreetingElement(driver) == "Success!"
# Make sure it was really frame "third" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame("third")
assert getTextOfGreetingElement(driver) == "Success!"
def testShouldBeAbleToClickInAFrameThatRewritesTopWindowLocation(driver, pages):
pages.load("click_tests/issue5237.html")
driver.switch_to.frame(driver.find_element_by_id("search"))
driver.find_element(By.ID, "submit").click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until(EC.title_is("Target page for issue 5237"))
def testShouldBeAbleToClickInASubFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
# This should replace frame "iframe1" inside frame "sixth" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "iframe1" inside frame "sixth" ...
assert getTextOfGreetingElement(driver), "Success!"
# Make sure it was really frame "iframe1" inside frame "sixth" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert driver.find_element(By.ID, "greeting").text == "Success!"
def testShouldBeAbleToFindElementsInIframesByXPath(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
element = driver.find_element(By.XPATH, "//*[@id = 'changeme']")
assert element is not None
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrl(driver, pages):
pages.load("frameset.html")
assert "frameset.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_name("second"))
assert "frameset.html" in driver.current_url
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrlForIframes(driver, pages):
pages.load("iframes.html")
assert "iframes.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert "iframes.html" in driver.current_url
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUs(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until_not(
EC.presence_of_element_located((By.ID, "iframe1")))
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "iframe1")))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithFrameIndex(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = 0
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithWebelement(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_chrome(raises=NoSuchElementException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldNotBeAbleToDoAnythingTheFrameIsDeletedFromUnderUs(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
with pytest.raises(NoSuchFrameException):
driver.find_element(By.ID, "killIframe").click()
def testShouldReturnWindowTitleInAFrameset(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
assert "Unique title" == driver.title
def testJavaScriptShouldExecuteInTheContextOfTheCurrentFrame(driver, pages):
pages.load("frameset.html")
assert driver.execute_script("return window == window.top")
driver.switch_to.frame(driver.find_element(By.NAME, "third"))
assert driver.execute_script("return window != window.top")
def testShouldNotSwitchMagicallyToTheTopWindow(driver, pages):
pages.load("frame_switching_tests/bug4876.html")
driver.switch_to.frame(0)
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
for i in range(20):
try:
input = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
submit = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "submitButton")))
input.clear()
import random
input.send_keys("rand%s" % int(random.random()))
submit.click()
finally:
url = driver.execute_script("return window.location.href")
# IE6 and Chrome add "?"-symbol to the end of the URL
if (url.endswith("?")):
url = url[:len(url) - 1]
assert pages.url("frame_switching_tests/bug4876_iframe.html") == url
def testGetShouldSwitchToDefaultContext(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe1")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
driver.find_element(By.ID, "cheese") # Found on formPage.html but not on iframes.html.
pages.load("iframes.html") # This must effectively switch_to.default_content(), too.
driver.find_element(By.ID, "iframe1")
|
leesavide/pythonista-docs
|
refs/heads/master
|
Documentation/matplotlib/mpl_examples/pylab_examples/stackplot_demo.py
|
6
|
import numpy as np
from matplotlib import pyplot as plt
fnx = lambda : np.random.randint(5, 50, 10)
y = np.row_stack((fnx(), fnx(), fnx()))
x = np.arange(10)
y1, y2, y3 = fnx(), fnx(), fnx()
fig, ax = plt.subplots()
ax.stackplot(x, y)
plt.show()
fig, ax = plt.subplots()
ax.stackplot(x, y1, y2, y3)
plt.show()
|
kholia/pyrpm
|
refs/heads/ng
|
pyrpm/resolver.py
|
1
|
#
# Copyright (C) 2004, 2005 Red Hat, Inc.
# Author: Thomas Woerner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
""" The Resolver
...
"""
from stat import S_ISLNK, S_ISDIR
from hashlist import HashList
from functions import *
import base
from logger import log
# ----------------------------------------------------------------------------
class RpmResolver:
"""A Database that handles installs, updates, etc., can check for conflicts
and gather resolvable and unresolvable dependencies.
Allows "direct" access to packages: "for %name in self",
"self[idx] => %name", "self[%name] => RpmPackage", "pkg in self."."""
OK = 1
ALREADY_INSTALLED = -1
OLD_PACKAGE = -2
NOT_INSTALLED = -3
UPDATE_FAILED = -4
ALREADY_ADDED = -5
ARCH_INCOMPAT = -6
OBSOLETE_FAILED = -10
CONFLICT = -11
FILE_CONFLICT = -12
# ----
def __init__(self, config, database, nocheck=0):
"""Initialize, with the "currently installed" packages in RpmPackage
list installed."""
self.config = config
self.database = database
self.clear()
# do no further checks
if nocheck:
return
if config.checkinstalled == 0:
self.installed_unresolved_file_requires = self.getUnresolvedFileRequires()
else:
self.check_installs = set(self.database.getPkgs())
# ----
def clear(self):
"""Clear all changed data."""
self.installs = set() # Added RpmPackage's
self.check_installs = set()
# new RpmPackage
# => ["originally" installed RpmPackage removed by update]
self.updates = { }
self.erases = set() # Removed RpmPackage's
self.check_erases = set()
self.check_file_requires = False
# new RpmPackage =>
# ["originally" installed RpmPackage obsoleted by update]
self.obsoletes = { }
self.installed_unresolved_file_requires = set()
# ----
def install(self, pkg, operation=OP_INSTALL):
"""Add RpmPackage pkg as part of the defined operation.
Return an RpmList error code (after warning the user)."""
# Add RpmPackage
# Return an RpmList error code (after warning the user). Check whether
# a package with the same NEVRA is already in the database.
name = pkg["name"]
if self.database.hasName(name):
for r in self.database.getPkgsByName(name):
ret = self.__install_check(r, pkg)
if ret != self.OK:
return ret
if not self.isInstalled(pkg):
self.installs.add(pkg)
self.erases.discard(pkg)
self.check_installs.add(pkg)
self.database.addPkg(pkg)
return self.OK
# ----
def update(self, pkg):
name = pkg["name"]
# get obsoletes
# Valid only during OP_UPDATE: list of RpmPackage's that will be
# obsoleted by the current update
self.pkg_obsoletes = set()
for u in pkg["obsoletes"]:
s = self.database.searchDependency(u[0], u[1], u[2])
for r in s:
if r["name"] != pkg["name"]:
self.pkg_obsoletes.add(r)
# update package
# Valid only during OP_UPDATE: list of RpmPackage's that will be
# removed by the current update
self.pkg_updates = [ ]
for r in self.database.getPkgsByName(name):
ret = pkgCompare(r, pkg)
if ret > 0: # old_ver > new_ver
if self.config.oldpackage == 0:
if self.isInstalled(r):
msg = "%s: A newer package is already installed"
else:
msg = "%s: A newer package was already added"
log.debug1(msg, pkg.getNEVRA())
del self.pkg_updates
return self.OLD_PACKAGE
else:
# old package: simulate a new package
ret = -1
if ret < 0: # old_ver < new_ver
if self.config.exactarch == 1 and \
self.__arch_incompat(pkg, r):
del self.pkg_updates
return self.ARCH_INCOMPAT
if archDuplicate(pkg["arch"], r["arch"]) or \
pkg["arch"] == "noarch" or r["arch"] == "noarch":
self.pkg_updates.append(r)
else: # ret == 0, old_ver == new_ver
if self.config.exactarch == 1 and \
self.__arch_incompat(pkg, r):
del self.pkg_updates
return self.ARCH_INCOMPAT
ret = self.__install_check(r, pkg) # Fails for same NEVRAs
if ret != self.OK:
del self.pkg_updates
return ret
if archDuplicate(pkg["arch"], r["arch"]):
if archCompat(pkg["arch"], r["arch"]):
if self.isInstalled(r):
msg = "%s: Ignoring due to installed %s"
ret = self.ALREADY_INSTALLED
else:
msg = "%s: Ignoring due to already added %s"
ret = self.ALREADY_ADDED
log.debug1(msg, pkg.getNEVRA(), r.getNEVRA())
del self.pkg_updates
return ret
else:
self.pkg_updates.append(r)
ret = self.install(pkg, operation=OP_UPDATE)
if ret != self.OK:
del self.pkg_updates
return ret
for r in self.pkg_updates:
if self.isInstalled(r):
log.debug1("%s was already installed, replacing with %s",
r.getNEVRA(), pkg.getNEVRA())
else:
log.debug1("%s was already added, replacing with %s",
r.getNEVRA(), pkg.getNEVRA())
if self._pkgUpdate(pkg, r) != self.OK: # Currently can't fail
del self.pkg_updates
return self.UPDATE_FAILED
del self.pkg_updates
# handle obsoletes
for r in self.pkg_obsoletes:
# package is not the same and has not the same name
if self.isInstalled(r):
fmt = "%s obsoletes installed %s, removing %s"
else:
fmt = "%s obsoletes added %s, removing %s"
log.debug1(fmt, pkg.getNEVRA(), r.getNEVRA(), r.getNEVRA())
if self._pkgObsolete(pkg, r) != self.OK:
del self.pkg_obsoletes
return self.OBSOLETE_FAILED
del self.pkg_obsoletes
return self.OK
# ----
def freshen(self, pkg):
"""Add RpmPackage pkg, removing older versions, if a package of the
same %name and base arch is "originally" installed.
Return an RpmList error code."""
found = 0
for r in self.database.getPkgByName(pkg["name"]):
if r in self.installs: continue
if archDuplicate(pkg["arch"], r["arch"]):
found = 1
break
if not found:
# pkg already got deleted from database
name = pkg["name"]
for r in self.erases:
if (r["name"] == name and
archDuplicate(pkg["arch"], r["arch"])):
found = 1
break
if found == 1:
return self.update(pkg)
return self.NOT_INSTALLED
# ----
def erase(self, pkg):
"""Remove RpmPackage.
Return an RpmList error code (after warning the user)."""
name = pkg["name"]
if not self.database.hasName(name) or \
pkg not in self.database.getPkgsByName(name):
log.warning("Package %s (id %s) not found", pkg.getNEVRA(),
id(pkg))
return self.NOT_INSTALLED
if self.isInstalled(pkg):
self.erases.add(pkg)
self.installs.discard(pkg)
self.check_installs.discard(pkg)
if pkg in self.updates:
del self.updates[pkg]
if pkg in self.obsoletes:
del self.obsoletes[pkg]
self.check_erases.add(pkg)
self.check_file_requires = True
self.database.removePkg(pkg)
return self.OK
# ----
def _checkObsoletes(self, pkg, dep, list, operation=OP_INSTALL):
"""RpmPackage pkg to be newly installed during operation provides dep,
which is obsoleted by RpmPackage's in list.
Filter out irrelevant obsoletes and return 1 if pkg remains obsoleted,
0 otherwise. dep is (name, RPMSENSE_* flag, EVR string) or
(filename, 0, "")."""
ret = 0
conflicts = self._getObsoletes(pkg, dep, list, operation)
for (c,r) in conflicts:
if operation == OP_UPDATE and \
(r in self.pkg_updates or r in self.pkg_obsoletes):
continue
if self.isInstalled(r):
fmt = "%s conflicts with already installed %s on %s, skipping"
else:
fmt = "%s conflicts with already added %s on %s, skipping"
log.warning(fmt, pkg.getNEVRA(), depString(c), r.getNEVRA())
ret = 1
return ret
# ----
def _getObsoletes(self, pkg, dep, list, operation=OP_INSTALL):
"""RpmPackage pkg to be newly installed during operation provides dep,
which is obsoleted by RpmPackage's in list.
Return a pruned list of
((name, RPMSENSE_* flags, EVR string), RpmPackage): handle
config.checkinstalled, always allow updates and multilib packages. dep
is (name, RPMSENSE_* flag, EVR string) or (filename, 0, "")."""
obsoletes = [ ]
if len(list) != 0:
if pkg in list:
del list[pkg]
for r in list:
if operation == OP_UPDATE:
if pkg["name"] == r["name"]:
continue
else:
if pkg.getNEVR() == r.getNEVR():
continue
obsoletes.append((dep, r))
return obsoletes
# ----
def _getConflicts(self, pkg, dep, list):
"""RpmPackage pkg Conflicts: or Obsoletes: (name, RPMSENSE_* flag,
EVR string) dep, with RpmPackage's in list matching that.
Return a pruned list of (dep, matching RpmPackage): handle
config.checkinstalled, always allow updates and multilib packages."""
conflicts = [ ]
if len(list) != 0:
if pkg in list:
list.remove(pkg)
for r in list:
if pkg.getNEVR() == r.getNEVR():
continue
conflicts.append((dep, r))
return conflicts
# ----
def _hasFileConflict(self, pkg1, pkg2, filename):
"""RpmPackage's pkg1 and pkg2 share filename.
Return 1 if the conflict is "real", 0 if it should be ignored.
pkg1_fi is RpmFileInfo of filename in pkg1."""
# pkg1_fi = pkg1.getRpmFileInfo(idx1)
pkg1_fi = pkg1.getRpmFileInfo(filename)
pkg2_fi = pkg2.getRpmFileInfo(filename)
# do not check packages with the same NEVR which are
# not buildarchtranslate compatible
if pkg1.getNEVR() == pkg2.getNEVR() and \
buildarchtranslate[pkg1["arch"]] != \
buildarchtranslate[pkg2["arch"]] and \
pkg1["arch"] != "noarch" and \
pkg2["arch"] != "noarch" and \
pkg1_fi.filecolor != pkg2_fi.filecolor and \
pkg1_fi.filecolor > 0 and pkg2_fi.filecolor > 0:
return 0
# check if data is sufficient
if not pkg1_fi.mode:
raise ValueError, \
"Package '%s': File mode is not set for file '%s'" % \
(pkg1.getNEVRA(), filename)
if not pkg2_fi.mode:
raise ValueError, \
"Package '%s': File mode is not set for file '%s'" % \
(pkg2.getNEVRA(), filename)
# check if user and group are identical
if pkg1_fi.uid != pkg2_fi.uid and \
pkg1_fi.gid != pkg2_fi.gid:
return 1
# ignore directories
if S_ISDIR(pkg1_fi.mode) and S_ISDIR(pkg2_fi.mode):
return 0
# ignore links
if S_ISLNK(pkg1_fi.mode) and S_ISLNK(pkg2_fi.mode) and \
(pkg1_fi.linkto == pkg2_fi.linkto):
return 0
# ignore identical files
if pkg1_fi.mode == pkg2_fi.mode and \
pkg1_fi.filesize == pkg2_fi.filesize and \
pkg1_fi.md5sum == pkg2_fi.md5sum:
return 0
# ignore ghost files
if pkg1_fi.flags & base.RPMFILE_GHOST or \
pkg2_fi.flags & base.RPMFILE_GHOST:
return 0
return 1
# ----
def _pkgObsolete(self, pkg, obsolete_pkg):
"""Remove RpmPackage obsolete_pkg because it will be obsoleted by
RpmPackage pkg.
Return an RpmList error code."""
if self.isInstalled(obsolete_pkg):
# assert obsolete_pkg not in self.obsoletes
self.obsoletes.setdefault(pkg, [ ]).append(obsolete_pkg)
else:
self._inheritUpdates(pkg, obsolete_pkg)
self._inheritObsoletes(pkg, obsolete_pkg)
return self.erase(obsolete_pkg)
# ----
def _pkgUpdate(self, pkg, update_pkg):
"""Remove RpmPackage update_pkg because it will be replaced by
RpmPackage pkg.
Return an RpmList error code."""
if not self.isInstalled(update_pkg):
self._inheritObsoletes(pkg, update_pkg)
if self.isInstalled(update_pkg):
# assert update_pkg not in self.updates
self.updates.setdefault(pkg, [ ]).append(update_pkg)
else:
self._inheritUpdates(pkg, update_pkg)
return self.erase(update_pkg)
# ----
def isInstalled(self, pkg):
"""Return True if RpmPackage pkg is an "originally" installed
package.
Note that having the same NEVRA is not enough, the package should
be from self.names."""
if pkg in self.erases:
return True
if pkg in self.installs:
return False
return pkg in self.database
# ----
def __install_check(self, r, pkg):
"""Check whether RpmPackage pkg can be installed when RpmPackage r
with same %name is already in the current list.
Return an RpmList error code (after warning the user)."""
if r == pkg or r.isEqual(pkg):
if self.isInstalled(r):
log.debug1("%s: %s is already installed", pkg.getNEVRA(),
r.getNEVRA())
return self.ALREADY_INSTALLED
else:
log.debug1("%s: %s was already added", pkg.getNEVRA(),
r.getNEVRA())
return self.ALREADY_ADDED
return self.OK
# ----
def __arch_incompat(self, pkg, r):
"""Return True (and warn) if RpmPackage's pkg and r have different
architectures, but the same base arch.
Warn the user before returning True."""
if pkg["arch"] != r["arch"] and archDuplicate(pkg["arch"], r["arch"]):
log.debug1("%s does not match arch %s.", pkg.getNEVRA(), r["arch"])
return 1
return 0
# ----
def _inheritUpdates(self, pkg, old_pkg):
"""RpmPackage old_pkg will be replaced by RpmPackage pkg; inherit
packages updated by old_pkg."""
if old_pkg in self.updates:
if pkg in self.updates:
self.updates[pkg].extend(self.updates[old_pkg])
normalizeList(self.updates[pkg])
else:
self.updates[pkg] = self.updates[old_pkg]
del self.updates[old_pkg]
# ----
def _inheritObsoletes(self, pkg, old_pkg):
"""RpmPackage old_pkg will be replaced by RpmPackage pkg; inherit
packages obsoleted by old_pkg."""
if old_pkg in self.obsoletes:
if pkg in self.obsoletes:
self.obsoletes[pkg].extend(self.obsoletes[old_pkg])
normalizeList(self.obsoletes[pkg])
else:
self.obsoletes[pkg] = self.obsoletes[old_pkg]
del self.obsoletes[old_pkg]
# ----
def getPkgDependencies(self, pkg):
"""Gather all dependencies of RpmPackage pkg.
Return (unresolved, resolved). "unresolved" is a list of
(name, RPMSENSE_* flag, EVR string); "resolved" is a list of
((name, RPMSENSE_* flag, EVR string),
[relevant resolving RpmPackage's]).
A RpmPackage is ignored (not "relevant") if it is not pkg and pkg
itself fulfills that dependency."""
unresolved = [ ]
resolved = [ ]
# TODO: use db.getRequires()
for u in pkg["requires"]:
if u[0][:7] == "rpmlib(": # drop rpmlib requirements
continue
s = self.database.searchDependency(u[0], u[1], u[2])
# # drop self script prereq and self script postun
# # these prereqs can not be solved by the package itself
# if len(s) > 0 and pkg in s and isLegacyPreReq(u[1]) and \
# (u[1] & RPMSENSE_SCRIPT_PRE != 0 or \
# u[1] & RPMSENSE_SCRIPT_POSTUN != 0):
# if len(s) == 1:
# s = [ ]
# else:
# s.remove(pkg)
if len(s) == 0: # unresolved
unresolved.append(u)
else: # resolved
if pkg in s and len(s) > 1:
s = [pkg]
resolved.append((u, s))
return (unresolved, resolved)
# ----
def getResolvedPkgDependencies(self, pkg):
"""Gather all dependencies of RpmPackage pkg.
Return (unresolved, resolved). "unresolved" is a list of
(name, RPMSENSE_* flag, EVR string); "resolved" is a list of
((name, RPMSENSE_* flag, EVR string),
[relevant resolving RpmPackage's]).
A RpmPackage is ignored (not "relevant") if it is not pkg and pkg
itself fulfills that dependency."""
resolved = [ ]
# TODO: use db.getRequires()
for u in pkg["requires"]:
if u[0][:7] == "rpmlib(": # drop rpmlib requirements
continue
s = self.database.searchDependency(u[0], u[1], u[2])
if len(s) > 0: # resolved
if pkg in s and len(s) > 1:
s = [pkg]
resolved.append((u, s))
return resolved
# ----
def getUnresolvedFileRequires(self, ignore=set()):
db = self.database
filereqs = db.getFileRequires()
normalizeList(filereqs)
if ignore:
filereqs = [f for f in filereqs if f not in ignore]
result = set()
for myfile in filereqs:
if not db.searchDependency(myfile, 0, ""):
result.add(myfile)
return result
# ----
def checkDependencies(self):
"""Check dependencies, report errors.
Return 1 if all dependencies are resolved, 0 if not (after warning the
user)."""
no_unresolved = 1
if self.config.checkinstalled == 0:
unresolved = self.getUnresolvedDependencies()
for p in unresolved.keys():
log.error("%s: unresolved dependencies:", p.getNEVRA())
for u in unresolved[p]:
log.error("\t%s", depString(u))
if unresolved:
return 0
return 1
for name in self.database.getNames():
for r in self.database.getPkgsByName(name):
if self.config.checkinstalled == 0 and \
len(self.erases) == 0 and self.isInstalled(r):
# do not check installed packages if no packages
# are getting removed (by erase, update or obsolete)
continue
log.debug1("Checking dependencies for %s", r.getNEVRA())
(unresolved, resolved) = self.getPkgDependencies(r)
if len(resolved) > 0:
log.debug2("%s: resolved dependencies:", r.getNEVRA())
for (u, s) in resolved:
s2 = ""
for r2 in s:
s2 += "%s " % r2.getNEVRA()
log.debug2("\t%s: %s", depString(u), s2)
if len(unresolved) > 0:
no_unresolved = 0
log.error("%s: unresolved dependencies:", r.getNEVRA())
for u in unresolved:
log.erro("\t%s", depString(u))
return no_unresolved
# ----
def getResolvedDependencies(self):
"""Get resolved dependencies.
Return a HashList: RpmPackage =>
[((name, RPMSENSE_* flags, EVR string),
[relevant resolving RpmPackage's])]."""
all_resolved = HashList()
for name in self.database.getNames():
for r in self.database.getPkgsByName(name):
log.debug1("Checking dependencies for %s", r.getNEVRA())
(unresolved, resolved) = self.getPkgDependencies(r)
if len(resolved) > 0:
all_resolved.setdefault(r, [ ]).extend(resolved)
return all_resolved
# ----
def getUnresolvedDependencies(self):
"""Get all unresolved dependencies.
Return a HashList: RpmPackage =>
[(name, RPMSENSE_* flags, EVR string)]."""
unresolved = HashList()
for p, d in self.iterUnresolvedDependencies():
unresolved.setdefault(p, [ ]).append(d)
return unresolved
# ----
def iterUnresolvedDependencies(self):
"""only check changes done to the database"""
for pkg in list(self.check_erases):
# check if provides are required and not provided by another
# package
ok = True
for dep in pkg["provides"]:
sr = self.database.searchRequires(dep[0], dep[1], dep[2])
for p in sr:
for d in sr[p]:
sp = self.database.searchProvides(d[0], d[1], d[2])
if len(sp) > 0:
continue
ok = False
yield p, d
if ok and pkg in self.check_erases:
self.check_erases.remove(pkg)
# check new packages
for pkg in list(self.check_installs):
ok = True
for u in pkg["requires"]:
if u[0][:7] == "rpmlib(": # drop rpmlib requirements
continue
s = self.database.searchDependency(u[0], u[1], u[2])
if len(s) > 0: # found something
continue
ok = False
yield pkg, u
if ok and pkg in self.check_installs:
self.check_installs.remove(pkg)
if self.check_file_requires:
ok = True
# check if filenames are required and not provided by another
# package
unresolved = self.getUnresolvedFileRequires(
self.installed_unresolved_file_requires)
for f in unresolved:
sr = self.database.searchRequires(f, 0, "")
for p, r in sr.iteritems():
for dep in r:
ok = False
yield p, dep
self.check_file_requires = not ok or bool(self.check_erases)
def getPkgConflicts(self, pkg, deps, dest):
"""Check for conflicts to pkg's deps, add results to dest[pkg].
dest[pkg] will be
[((name, RPMSENSE_* flags, EVR string), conflicting RpmPackage)]."""
for c in deps:
s = self.database.searchDependency(c[0], c[1], c[2])
pruned = self._getConflicts(pkg, c, s)
for c in pruned:
if pkg not in dest:
dest[pkg] = [ ]
if c not in dest[pkg]:
dest[pkg].append(c)
# ----
def getConflicts(self):
"""Check for conflicts in conflicts and obsoletes among currently
installed packages.
Return a HashList: RpmPackage =>
[((name, RPMSENSE_* flags, EVR string), conflicting RpmPackage)]."""
conflicts = HashList()
if self.config.noconflicts:
# conflicts turned off
return conflicts
if self.config.checkinstalled == 0:
for r in self.installs:
log.debug1("Checking for conflicts for %s", r.getNEVRA())
self.getPkgConflicts(r, r["conflicts"] + r["obsoletes"],
conflicts)
return conflicts
for name in self.database.getNames():
for r in self.database.getPkgsByName(name):
log.debug1( "Checking for conflicts for %s", r.getNEVRA())
self.getPkgConflicts(r, r["conflicts"] + r["obsoletes"],
conflicts)
return conflicts
def getObsoleteConflicts(self):
"""Check for conflicts from obsoletes of installed packages
against newly installed packages.
Return a HashList: RpmPackage =>
[((name, RPMSENSE_* flags, EVR string), conflicting RpmPackage)]."""
conflicts = HashList()
if self.config.checkinstalled:
# conflicts get caught by getConflicts()
return conflicts
for n, f, v, pkg in self.database.iterObsoletes():
if pkg in self.installs:
continue
for p in self.database.searchDependency(n, f, v):
if not p in self.installs:
continue
# if p in self.updates:
# XXX check old pkg has already same conflict
conflicts.setdefault(pkg, []).append(((n, f, v), p))
return conflicts
# ----
def getObsoletes(self):
"""Check for obsoletes among packages in self.database.names.
Return a HashList: RpmPackage =>
[((name, RPMSENSE_* flags, EVR string), obsoleted RpmPackage)]."""
obsoletes = HashList()
if self.config.noconflicts:
# conflicts turned off, obsoletes are also conflicts, but in an
# other level
return obsoletes
if self.config.checkinstalled == 0:
for r in self.installs:
log.debug1("Checking for obsoletes for %s", r.getNEVRA())
self.getPkgConflicts(r, r["obsoletes"], obsoletes)
return obsoletes
for name in self.database.getNames():
for r in self.database.getPkgsByName(name):
log.debug1(1, "Checking for obsoletes for %s" % r.getNEVRA())
self.getPkgConflicts(r, r["obsoletes"], obsoletes)
return obsoletes
# ----
def checkConflicts(self):
"""Check for package conflicts, report errors.
Return 1 if OK, 0 if there are conflicts (after warning the user)."""
result = 1
for conflicts in self.getConflicts(), self.getObsoleteConflicts():
if len(conflicts) == 0:
continue
result = 0
for pkg in conflicts:
conf = { }
for c,r in conflicts[pkg]:
if not r in conf:
conf[r] = [ ]
if not c in conf[r]:
conf[r].append(c)
for r in conf.keys():
log.error("%s conflicts with %s on:", pkg.getNEVRA(),
r.getNEVRA())
for c in conf[r]:
log.error("\t%s", depString(c))
return result
# ----
def getFileConflicts(self):
"""Find file conflicts among packages in self.database.names.
Return a HashList:
RpmPackage => [(filename, conflicting RpmPackage)]."""
conflicts = HashList()
db = self.database
if self.config.nofileconflicts:
# file conflicts turned off
return conflicts
if self.config.checkinstalled == 0:
# no conflicts if there is no new package
for pkg in self.installs:
for name in pkg.iterFilenames():
dups = db.searchFilenames(name)
if len(dups) == 1: continue
log.debug1("Checking for file conflicts for '%s'", name)
for p in dups:
if p is pkg: continue
if self._hasFileConflict(pkg, p, name):
conflicts.setdefault(pkg, [ ]).append(
(name, p))
return conflicts
# duplicates: { name: [(pkg, idx),..], .. }
duplicates = self.database.getFileDuplicates()
for name in duplicates:
dups = duplicates[name]
log.debug1("Checking for file conflicts for '%s'", name)
for j in xrange(len(dups)):
for k in xrange(j+1, len(dups)):
if not self._hasFileConflict(dups[j], dups[k], name):
continue
conflicts.setdefault(dups[j], [ ]).append((name, dups[k]))
conflicts.setdefault(dups[k], [ ]).append((name, dups[j]))
return conflicts
# ----
def checkFileConflicts(self):
"""Check file conflicts, report errors.
Return 1 if OK, 0 if there are file conflicts (after warning the
user)."""
conflicts = self.getFileConflicts()
if len(conflicts) == 0:
return 1
for pkg in conflicts:
conf = { }
for f,r in conflicts[pkg]:
if not r in conf:
conf[r] = [ ]
if not f in conf[r]:
conf[r].append(f)
for r in conf.keys():
log.error("%s file conflicts with %s on:",
pkg.getNEVRA(), r.getNEVRA())
for f in conf[r]:
log.error("\t%s", f)
return 0
# ----
def getDatabase(self):
return self.database
# ----
def resolve(self):
"""Check dependencies and conflicts.
Return 1 if everything is OK, a negative number if not (after warning
the user)."""
# checking dependencies
if self.checkDependencies() != 1:
return -1
if self.config.noconflicts == 0:
# check for conflicts
if self.checkConflicts() != 1:
return -2
if self.config.nofileconflicts == 0:
# check for file conflicts
if self.checkFileConflicts() != 1:
return -3
return 1
# vim:ts=4:sw=4:showmatch:expandtab
|
dymkowsk/mantid
|
refs/heads/master
|
scripts/Inelastic/dos/load_castep.py
|
3
|
#pylint: disable=redefined-builtin
from __future__ import (absolute_import, division, print_function)
from six.moves import range
import re
import numpy as np
import dos.load_helper as load_helper
def parse_castep_file(file_name, ir_or_raman):
"""
Read frequencies from a <>.castep file
@param file_name - file path of the file to read
@return the frequencies, infra red and raman intensities and weights of frequency blocks
"""
file_data = {}
# Get Regex strings from load_helper
header_regex = re.compile(load_helper.CASTEP_HEADER_REGEX)
data_regex = re.compile(load_helper.CASTEP_DATA_REGEX)
bond_regex = re.compile(load_helper.CASTEP_BOND_REGEX)
block_count = 0
frequencies, ir_intensities, raman_intensities, weights, q_vectors, bonds = [], [], [], [], [], []
data_lists = (frequencies, ir_intensities, raman_intensities)
with open(file_name, 'rU') as f_handle:
file_data.update(_parse_castep_file_header(f_handle))
while True:
line = f_handle.readline()
# Check we've reached the end of file
if not line:
break
# Check if we've found a block of frequencies
header_match = header_regex.match(line)
if header_match:
block_count += 1
weight, q_vector = load_helper._parse_block_header(header_match, block_count)
weights.append(weight)
q_vectors.append(q_vector)
# Move file pointer forward to start of intensity data
_find_castep_freq_block(f_handle, data_regex)
# Parse block of frequencies
for line_data in _parse_castep_freq_block(f_handle, file_data['num_branches'], ir_or_raman):
for data_list, item in zip(data_lists, line_data):
data_list.append(item)
# Check if we've found a bond
bond_match = bond_regex.match(line)
if bond_match:
bonds.append(_parse_castep_bond(bond_match))
frequencies = np.asarray(frequencies)
ir_intensities = np.asarray(ir_intensities)
raman_intensities = np.asarray(raman_intensities)
warray = np.repeat(weights, file_data['num_branches'])
file_data.update({
'frequencies': frequencies,
'ir_intensities': ir_intensities,
'raman_intensities': raman_intensities,
'weights': warray,
'q_vectors':q_vectors
})
if len(bonds) > 0:
file_data['bonds'] = bonds
return file_data
#----------------------------------------------------------------------------------------
def _parse_castep_file_header(f_handle):
"""
Read information from the header of a <>.castep file
@param f_handle - handle to the file.
@return tuple of the number of ions and branches in the file
"""
num_species = 0
file_data = {}
while True:
line = f_handle.readline()
if not line:
raise IOError("Could not find any header information.")
if 'Total number of ions in cell =' in line:
file_data['num_ions'] = int(line.strip().split()[-1])
elif 'Total number of species in cell = ' in line:
num_species = int(line.strip().split()[-1])
if num_species > 0 and file_data['num_ions'] > 0:
file_data['num_branches'] = num_species * file_data['num_ions']
return file_data
#----------------------------------------------------------------------------------------
def _parse_castep_freq_block(f_handle, num_branches, ir_or_raman):
"""
Iterator to parse a block of frequencies from a .castep file.
@param f_handle - handle to the file.
"""
for _ in range(num_branches):
line = f_handle.readline()
line_data = line.strip().split()[1:-1]
freq = line_data[1]
intensity_data = line_data[3:]
# Remove non-active intensities from data
intensities = []
for value, active in zip(intensity_data[::2], intensity_data[1::2]):
if ir_or_raman:
if active == 'N' and value != 0:
value = 0.0
intensities.append(value)
line_data = [freq] + intensities
line_data = [float(x) for x in line_data]
yield line_data
#----------------------------------------------------------------------------------------
def _find_castep_freq_block(f_handle, data_regex):
"""
Find the start of the frequency block in a .castep file.
This will set the file pointer to the line before the start
of the block.
@param f_handle - handle to the file.
"""
while True:
pos = f_handle.tell()
line = f_handle.readline()
if not line:
raise IOError("Could not parse frequency block. Invalid file format.")
if data_regex.match(line):
f_handle.seek(pos)
return
#----------------------------------------------------------------------------------------
def _parse_castep_bond(bond_match):
"""
Parses a regex match to obtain bond information.
@param bond_match Regex match to bond data line
@return A dictionary defining the bond
"""
bond = dict()
bond['atom_a'] = (bond_match.group(1), int(bond_match.group(2)))
bond['atom_b'] = (bond_match.group(3), int(bond_match.group(4)))
bond['population'] = float(bond_match.group(5))
bond['length'] = float(bond_match.group(6))
return bond
#----------------------------------------------------------------------------------------
|
weleen/mxnet
|
refs/heads/master
|
python/mxnet/contrib/ndarray.py
|
54
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""NDArray namespace used to register contrib functions"""
__all__ = []
|
infoxchange/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/utils/unittest/__main__.py
|
572
|
"""Main entry point"""
import sys
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "unittest2"
__unittest = True
from django.utils.unittest.main import main_
main_()
|
liangwang/m5
|
refs/heads/master
|
src/arch/x86/isa/insts/x87/control/save_x87_status_word.py
|
19
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FSTSW
# FNSTSW
'''
|
djabber/Dashboard
|
refs/heads/master
|
bottle/dash/lib/python2.7/encodings/utf_16.py
|
404
|
""" Python 'utf-16' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_16_encode
def decode(input, errors='strict'):
return codecs.utf_16_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_16_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_16_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_16_be_decode
elif consumed >= 2:
raise UnicodeError("UTF-16 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.encoder = None
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_16_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_16_le_decode
elif byteorder == 1:
self.decode = codecs.utf_16_be_decode
elif consumed>=2:
raise UnicodeError,"UTF-16 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
romain-li/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/__init__.py
|
29
|
""" Certificates app """
# this is here to support registering the signals in signals.py
from . import signals
|
mistio/libcloud
|
refs/heads/trunk
|
docs/examples/dns/durabledns/instantiate_driver.py
|
35
|
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
cls = get_driver(Provider.DURABLEDNS)
driver = cls('api_user', 'api_key')
|
stclair/wes-cms
|
refs/heads/master
|
filetransfers/templatetags/filetransfers.py
|
29
|
from django.template import Library
from django.utils.safestring import mark_safe
from ..api import public_download_url
register = Library()
_hidden_data_field = '<input type="hidden" name="%s" value="%s" />'
@register.simple_tag
def render_upload_data(data):
inputs = ''.join(_hidden_data_field % item for item in data.items())
if inputs:
return mark_safe('<div style="display:none">%s</div>' % inputs)
return ''
register.filter(public_download_url)
|
xpansa/server-tools
|
refs/heads/8.0
|
database_cleanup/model/purge_tables.py
|
44
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class CleanupPurgeLineTable(orm.TransientModel):
_inherit = 'cleanup.purge.line'
_name = 'cleanup.purge.line.table'
_columns = {
'wizard_id': fields.many2one(
'cleanup.purge.wizard.table', 'Purge Wizard', readonly=True),
}
def purge(self, cr, uid, ids, context=None):
"""
Unlink tables upon manual confirmation.
"""
lines = self.browse(cr, uid, ids, context=context)
tables = [line.name for line in lines]
for line in lines:
if line.purged:
continue
# Retrieve constraints on the tables to be dropped
# This query is referenced in numerous places
# on the Internet but credits probably go to Tom Lane
# in this post http://www.postgresql.org/\
# message-id/22895.1226088573@sss.pgh.pa.us
# Only using the constraint name and the source table,
# but I'm leaving the rest in for easier debugging
cr.execute(
"""
SELECT conname, confrelid::regclass, af.attname AS fcol,
conrelid::regclass, a.attname AS col
FROM pg_attribute af, pg_attribute a,
(SELECT conname, conrelid, confrelid,conkey[i] AS conkey,
confkey[i] AS confkey
FROM (select conname, conrelid, confrelid, conkey,
confkey, generate_series(1,array_upper(conkey,1)) AS i
FROM pg_constraint WHERE contype = 'f') ss) ss2
WHERE af.attnum = confkey AND af.attrelid = confrelid AND
a.attnum = conkey AND a.attrelid = conrelid
AND confrelid::regclass = '%s'::regclass;
""" % line.name)
for constraint in cr.fetchall():
if constraint[3] in tables:
self.logger.info(
'Dropping constraint %s on table %s (to be dropped)',
constraint[0], constraint[3])
cr.execute(
"ALTER TABLE %s DROP CONSTRAINT %s" % (
constraint[3], constraint[0]))
self.logger.info(
'Dropping table %s', line.name)
cr.execute("DROP TABLE \"%s\"" % (line.name,))
line.write({'purged': True})
cr.commit()
return True
class CleanupPurgeWizardTable(orm.TransientModel):
_inherit = 'cleanup.purge.wizard'
_name = 'cleanup.purge.wizard.table'
def default_get(self, cr, uid, fields, context=None):
res = super(CleanupPurgeWizardTable, self).default_get(
cr, uid, fields, context=context)
if 'name' in fields:
res['name'] = _('Purge tables')
return res
def find(self, cr, uid, context=None):
"""
Search for tables that cannot be instantiated.
Ignore views for now.
"""
model_ids = self.pool['ir.model'].search(cr, uid, [], context=context)
# Start out with known tables with no model
known_tables = ['wkf_witm_trans']
for model in self.pool['ir.model'].browse(
cr, uid, model_ids, context=context):
model_pool = self.pool.get(model.model)
if not model_pool:
continue
known_tables.append(model_pool._table)
known_tables += [
column._sql_names(model_pool)[0]
for column in model_pool._columns.values()
if (column._type == 'many2many' and
hasattr(column, '_rel')) # unstored function fields of
# type m2m don't have _rel
]
# Cannot pass table names as a psycopg argument
known_tables_repr = ",".join(
[("'%s'" % table) for table in known_tables])
cr.execute(
"""
SELECT table_name FROM information_schema.tables
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
AND table_name NOT IN (%s)""" % known_tables_repr)
res = [(0, 0, {'name': row[0]}) for row in cr.fetchall()]
if not res:
raise orm.except_orm(
_('Nothing to do'),
_('No orphaned tables found'))
return res
_columns = {
'purge_line_ids': fields.one2many(
'cleanup.purge.line.table',
'wizard_id', 'Tables to purge'),
}
|
tinyHui/SearchEngine
|
refs/heads/master
|
app/SearchEngine/urls.py
|
2
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'Pages.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
thilbern/scikit-learn
|
refs/heads/master
|
sklearn/feature_selection/rfe.py
|
3
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
alexandershuping/cabbage-bot
|
refs/heads/dev
|
util/StarFramework.py
|
1
|
import discord
import cabbagerc as rc
from discord.ext import commands
from util.FlagFramework import FlagFramework
from util.Logger import Logger
from sql.cabbagebase import CabbageBase
from datetime import datetime
class StarThresholdError:
''' Thrown when a caller attempts to set the starboard threshold (the
number of stars required for the bot to post the message on the
starboard) to an invalid value.
'''
pass
class Starboard:
''' Class representing a server's Starboard '''
def __init__(self, server, bot):
self.log = Logger()
self.base = CabbageBase()
self.flags = FlagFramework()
self.server = server
self.bot = bot
self.sbChanid = None
self.sbChannel = None
self.thresh = None
if self.flags.hasFlag('channel', 'star', self.server.id):
self.sbChanid = self.flags.getFlag('channel', 'star', self.server.id)[0]['flag']
self.sbChannel = self.server.get_channel(self.sbChanid)
if self.flags.hasFlag('thresh', 'star', self.server.id):
self.thresh = self.flags.getFlag('thresh', 'star', self.server.id)[0]['flag']
def isWorking(self):
''' Returns true if the server's starboard has been set up and is
valid.
'''
return self.sbChannel and self.thresh and self.thresh > 0
def reset(self, channel, thresh):
''' Resets the starboard parameters to the provided ones.
'''
self.sbChannel = channel
if thresh > 0:
self.thresh = thresh
else:
raise StarThresholdError
self.log.log('Starboard for server ' + self.server.id + ' updated: parameters changed', 'star', 7)
self._update()
def _update(self):
''' Updates the server flags and database entries for the starboard.
Note that this does NOT check any messages for new stars or
update starboard messages.
'''
if self.thresh <= 0:
raise StarThresholdError
self.flags.tset('channel', 'star', self.server.id, self.sbChannel.id)
self.flags.iset('thresh', 'star', self.server.id, self.thresh)
self.sbChanId = self.sbChannel.id
def getStarboardMessage(self, chanid, messid):
''' Return a StarMessage object for the provided message.
'''
q = self.base.query('starboard', ('original_message_channel','starboard_message_messid'), (('server',int(self.server.id)),('original_message_channel',int(chanid)),('original_message_messid',int(messid))))
if q and len(q) > 0:
sm = StarMessage(self.server.id, q[0][0], messid, q[0][1])
else:
sm = StarMessage(self.server.id, chanid, messid)
return sm
def _determineAppropriateStarEmoji(self, numStars):
''' Determines the appropriate star emoji
'''
if numStars < self.thresh:
return '⚫'
elif numStars < (1.5 * self.thresh):
return '⭐'
elif numStars < (2 * self.thresh):
return '✴'
elif numStars < (3 * self.thresh):
return '🌠'
else:
return '🌌'
async def _postOrUpdateStarboardMessage(self, msg, channel):
''' Posts a message to the starboard, or (if it is already there)
updates it to reflect changes in star totals.
'''
srv = self.bot.get_server(msg.server)
cha = channel
mes = await self.bot.get_message(cha, msg.messid)
sbMessid = None
if msg.starboardMessid:
sbMessid = msg.starboardMessid
else:
# The message indicates that it is not yet on the starboard, but
# check anyway.
q = self.base.query('starboard', ('starboard_message_messid',), (('server',int(self.server.id)),('original_message_channel',int(self.sbChanid)),('original_message_messid',int(msg.messid))))
if q and len(q) > 0:
# It was actually on the starboard.
sbMessid = q[0][0]
newEmbed = msg.constructEmbed(mes)
numStars = msg.getStars()
header = '**' + self._determineAppropriateStarEmoji(numStars) + str(numStars) + ' ' + cha.mention + '**'
if sbMessid:
sbMessage = await self.bot.get_message(self.sbChannel, sbMessid)
await self.bot.edit_message(sbMessage, header, embed=newEmbed)
else:
newSbMes = await self.bot.send_message(self.sbChannel, header, embed=newEmbed)
cmd = 'INSERT INTO starboard (server, starboard, starboard_message_messid, original_message_channel, original_message_messid, original_message_sent) VALUES (%s,%s,%s,%s,%s,%s)'
cur = self.base.getCursor()
cur.execute(cmd, (int(self.server.id), int(self.sbChanid), int(newSbMes.id), int(mes.channel.id), int(mes.id), mes.timestamp))
self.base.commit()
cur.close()
class StarMessage:
''' Class representing a message with stars '''
def __init__(self, server, chanid, messid, starboardMessid=None):
self.base = CabbageBase()
self.server = server
self.chanid = chanid
self.messid = messid
self.starboardMessid = starboardMessid
def star(self, uid):
''' Adds a star to a message, as long as it has not been starred by
the same user before.
'''
if not self.hasStarred(uid):
cur = self.base.getCursor()
insertString = 'INSERT INTO stars (server,chanid,messid,starrer) VALUES (%s,%s,%s,%s);'
cur.execute(insertString, (self.server, self.chanid, self.messid, uid))
self.base.commit()
cur.close()
def unstar(self, uid):
''' Removes a star from a message.
'''
if self.hasStarred(self, uid):
cur = self.base.getCursor()
delString = 'DELETE FROM ONLY stars WHERE server=%s AND chanid=%s AND messid=%s AND starrer=%s'
cur.execute(delString, (self.server, self.chanid, self.messid, uid))
self.base.commit()
cur.close()
def getStars(self):
''' Returns the number of unique users who have starred the message.
'''
res = self.base.query('stars', ('server',), (('server', int(self.server)), ('messid', int(self.messid))))
return len(res)
def hasStarred(self, uid):
''' Determines whether the provided user has previously starred the
message.
'''
res = self.base.query('stars', ('server',), (('server', int(self.server)), ('messid', int(self.messid)), ('starrer', int(uid))))
return len(res) > 0
def constructEmbed(self, messageObject):
''' Constructs the embed object to be used in the starboard message.
'''
embed = discord.Embed(colour=discord.Colour(0x7f3e96), description=messageObject.content, timestamp=messageObject.timestamp)
embed.set_author(name=messageObject.author.name, icon_url=messageObject.author.avatar_url)
if messageObject.attachments and len(messageObject.attachments) > 0:
attachment = messageObject.attachments[0]
if attachment['filename'].lower().endswith(('gif', 'jpg', 'jpeg', 'png')):
embed.set_image(url=attachment['url'])
else:
embed.add_field(name='Also attached',value=str(attachment['filename']))
return embed
async def updateStarboard(self, serverObject, channelObject, bot):
''' Check if we're above the threshold value for the starboard. If so,
ask to be posted.
'''
s = Starboard(serverObject, bot)
if self.getStars() >= s.thresh:
await s._postOrUpdateStarboardMessage(self, channelObject)
|
usc-isi/extra-specs
|
refs/heads/master
|
nova/virt/vmwareapi_conn.py
|
2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware ESX platform.
**Related Flags**
:vmwareapi_host_ip: IPAddress of VMware ESX server.
:vmwareapi_host_username: Username for connection to VMware ESX Server.
:vmwareapi_host_password: Password for connection to VMware ESX Server.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 1.0).
:vmwareapi_api_retry_count: The API retry count in case of failure such as
network failures (socket errors etc.)
(default: 10).
"""
import time
from eventlet import event
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
help='URL for connection to VMWare ESX host.Required if '
'connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
help='Username for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
help='Password for connection to VMWare ESX host. '
'Used only if connection_type is vmwareapi.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if connection_type is vmwareapi'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if connection_type is vmwareapi'),
cfg.StrOpt('vmwareapi_vlan_interface',
default='vmnic0',
help='Physical ethernet adapter name for vlan networking'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
class Failure(Exception):
"""Base Exception class for handling task failures."""
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
def get_connection(_read_only):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
api_retry_count = FLAGS.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
"connection_type=vmwareapi"))
return VMWareESXConnection(host_ip, host_username, host_password,
api_retry_count)
class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = vmops.VMWareVMOps(session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
# FIXME(sateesh): implement this
pass
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, network_info,
block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, network_info)
def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_info(instance)
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
"""Return volume connector information"""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
'ip': FLAGS.vmwareapi_host_ip,
'initiator': None,
'host': None
}
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance."""
pass
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
pass
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
return {'address': FLAGS.vmwareapi_host_ip,
'username': FLAGS.vmwareapi_host_username,
'password': FLAGS.vmwareapi_host_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
class VMWareAPISession(object):
"""
Sets up a session with the ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
self._host_ip = host_ip
self._host_username = host_username
self._host_password = host_password
self.api_retry_count = api_retry_count
self._scheme = scheme
self._session_id = None
self.vim = None
self._create_session()
def _get_vim_object(self):
"""Create the VIM Object instance."""
return vim.Vim(protocol=self._scheme, host=self._host_ip)
def _create_session(self):
"""Creates a session with the ESX host."""
while True:
try:
# Login and setup the session with the ESX host for making
# API calls
self.vim = self._get_vim_object()
session = self.vim.Login(
self.vim.get_service_content().sessionManager,
userName=self._host_username,
password=self._host_password)
# Terminate the earlier session, if possible ( For the sake of
# preserving sessions as there is a limit to the number of
# sessions we can have )
if self._session_id:
try:
self.vim.TerminateSession(
self.vim.get_service_content().sessionManager,
sessionId=[self._session_id])
except Exception, excep:
# This exception is something we can live with. It is
# just an extra caution on our side. The session may
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
LOG.debug(excep)
self._session_id = session.key
return
except Exception, excep:
LOG.critical(_("In vmwareapi:_create_session, "
"got this exception: %s") % excep)
raise exception.NovaException(excep)
def __del__(self):
"""Logs-out the session."""
# Logout to avoid un-necessary increase in session count at the
# ESX host
try:
self.vim.Logout(self.vim.get_service_content().sessionManager)
except Exception, excep:
# It is just cautionary on our part to do a logout in del just
# to ensure that the session is not left active.
LOG.debug(excep)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""
Calls a method within the module specified with
args provided.
"""
args = list(args)
retry_count = 0
exc = None
last_fault_list = []
while True:
try:
if not self._is_vim_object(module):
# If it is not the first try, then get the latest
# vim object
if retry_count > 0:
args = args[1:]
args = [self.vim] + args
retry_count += 1
temp_module = module
for method_elem in method.split("."):
temp_module = getattr(temp_module, method_elem)
return temp_module(*args, **kwargs)
except error_util.VimFaultException, excep:
# If it is a Session Fault Exception, it may point
# to a session gone bad. So we try re-creating a session
# and then proceeding ahead with the call.
exc = excep
if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
# Because of the idle session returning an empty
# RetrievePropertiesResponse and also the same is returned
# when there is say empty answer to the query for
# VMs on the host ( as in no VMs on the host), we have no
# way to differentiate.
# So if the previous response was also am empty response
# and after creating a new session, we get the same empty
# response, then we are sure of the response being supposed
# to be empty.
if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
return []
last_fault_list = excep.fault_list
self._create_session()
else:
# No re-trying for errors for API call has gone through
# and is the caller's fault. Caller should handle these
# errors. e.g, InvalidArgument fault.
break
except error_util.SessionOverLoadException, excep:
# For exceptions which may come because of session overload,
# we retry
exc = excep
except Exception, excep:
# If it is a proper exception, say not having furnished
# proper data in the SOAP call or the retry limit having
# exceeded, we raise the exception
exc = excep
break
# If retry count has been reached then break and
# raise the exception
if retry_count > self.api_retry_count:
break
time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
LOG.critical(_("In vmwareapi:_call_method, "
"got this exception: %s") % exc)
raise
def _get_vim(self):
"""Gets the VIM object reference."""
if self.vim is None:
self._create_session()
return self.vim
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
loop.start(FLAGS.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
"""
Poll the given task, and fires the given Deferred if we
get a result.
"""
try:
task_info = self._call_method(vim_util, "get_dynamic_property",
task_ref, "Task", "info")
task_name = task_info.name
if task_info.state in ['queued', 'running']:
return
elif task_info.state == 'success':
LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
"status: success") % locals())
done.send("success")
else:
error_info = str(task_info.error.localizedMessage)
LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
"status: error %(error_info)s") % locals())
done.send_exception(exception.NovaException(error_info))
except Exception, excep:
LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
done.send_exception(excep)
|
Floens/mpv
|
refs/heads/master
|
waftools/checks/__init__.py
|
12133432
| |
saintbird/django-cms
|
refs/heads/develop
|
cms/test_utils/project/pluginapp/plugins/meta/migrations/__init__.py
|
12133432
| |
rahushen/ansible
|
refs/heads/devel
|
test/units/playbook/role/__init__.py
|
12133432
| |
hotpxl/caffe-m2
|
refs/heads/master
|
python/caffe/detection/__init__.py
|
12133432
| |
ylcrow/poweron
|
refs/heads/master
|
src/config/__init__.py
|
12133432
| |
ravindrapanda/tensorflow
|
refs/heads/master
|
tensorflow/contrib/hooks/__init__.py
|
147
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""hooks: A module containing `SessionRunHook`s for use with `MonitoredSession`.
@@ProfilerHook
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.hooks.python.training import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['ProfilerHook']
remove_undocumented(__name__, _allowed_symbols)
|
openpathsampling/openpathsampling
|
refs/heads/master
|
openpathsampling/analysis/__init__.py
|
3
|
from .path_histogram import PathHistogram, PathDensityHistogram
from .channel_analysis import ChannelAnalysis
from .replica_network import ReplicaNetwork, ReplicaNetworkGraph
from .shooting_point_analysis import ShootingPointAnalysis
from . import tis
from . import tools
|
DICENetworks/DICE-pjsip
|
refs/heads/dice_master
|
tests/pjsua/scripts-sendto/300_srtp_receive_no_key_2.py
|
42
|
# $Id$
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=tester
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
a=crypto:0 AES_CM_128_HMAC_SHA1_80 inline
"""
args = "--null-audio --auto-answer 200 --max-calls 1 --use-srtp 1 --srtp-secure 0"
include = []
exclude = []
sendto_cfg = sip.SendtoCfg( "caller send crypto attr without key, callee must not accept the call",
pjsua_args=args, sdp=sdp, resp_code=406,
resp_inc=include, resp_exc=exclude)
|
cortedeltimo/SickRage
|
refs/heads/master
|
lib/twilio/rest/resources/recordings.py
|
23
|
from .util import normalize_dates
from .transcriptions import Transcriptions
from .base import InstanceResource, ListResource
class Recording(InstanceResource):
subresources = [Transcriptions]
def __init__(self, *args, **kwargs):
super(Recording, self).__init__(*args, **kwargs)
self.formats = {
"mp3": self.uri + ".mp3",
"wav": self.uri + ".wav",
}
def delete(self):
"""
Delete this recording
"""
return self.delete_instance()
class Recordings(ListResource):
name = "Recordings"
instance = Recording
@normalize_dates
def list(self, before=None, after=None, **kwargs):
"""
Returns a page of :class:`Recording` resources as a list.
For paging information see :class:`ListResource`.
:param date after: Only list recordings logged after this datetime
:param date before: Only list recordings logger before this datetime
:param call_sid: Only list recordings from this :class:`Call`
"""
kwargs["DateCreated<"] = before
kwargs["DateCreated>"] = after
return self.get_instances(kwargs)
@normalize_dates
def iter(self, before=None, after=None, **kwargs):
"""
Returns an iterator of :class:`Recording` resources.
:param date after: Only list recordings logged after this datetime
:param date before: Only list recordings logger before this datetime
"""
kwargs["DateCreated<"] = before
kwargs["DateCreated>"] = after
return super(Recordings, self).iter(**kwargs)
def delete(self, sid):
"""
Delete the given recording
"""
return self.delete_instance(sid)
|
cs-au-dk/Artemis
|
refs/heads/master
|
WebKit/Source/WebKit2/Scripts/generate-messages-header.py
|
145
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import sys
import webkit2.messages
def main(argv=None):
if not argv:
argv = sys.argv
input_path = argv[1]
with open(input_path) as input_file:
# Python 3, change to: print(webkit2.messages.generate_messages_header(input_file), end='')
sys.stdout.write(webkit2.messages.generate_messages_header(input_file))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-1.0.0-20141201.234602-19-python-distribution/repo/sleekxmpp/plugins/xep_0270.py
|
14
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins import BasePlugin, register_plugin
class XEP_0270(BasePlugin):
name = 'xep_0270'
description = 'XEP-0270: XMPP Compliance Suites 2010'
dependencies = set(['xep_0030', 'xep_0115', 'xep_0054',
'xep_0163', 'xep_0045', 'xep_0085'])
register_plugin(XEP_0270)
|
rmfitzpatrick/ansible
|
refs/heads/devel
|
lib/ansible/plugins/terminal/ce.py
|
122
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r'[\r\n]?<.+>(?:\s*)$'),
re.compile(r'[\r\n]?\[.+\](?:\s*)$'),
]
terminal_stderr_re = [
re.compile(r"% ?Error: "),
re.compile(r"^% \w+", re.M),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
re.compile(r"syntax error"),
re.compile(r"unknown command"),
re.compile(r"Error\[\d+\]: ", re.I),
re.compile(r"Error:", re.I)
]
def on_open_shell(self):
try:
self._exec_cli_command('screen-length 0 temporary')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
sch3m4/intelmq
|
refs/heads/master
|
intelmq/tests/bots/parsers/vxvault/__init__.py
|
12133432
| |
fgirault/smeuhsocial
|
refs/heads/master
|
apps/threadedcomments_extras/__init__.py
|
12133432
| |
astyl/wxPlotLab
|
refs/heads/master
|
mplotlab/models/slides.py
|
2
|
# -*-coding:Utf-8 -*
from mplotlab.models.abcmodels import AModel
from mplotlab.utils.abctypes import STRING,LIST,INT,RegisterType
class ASlide(AModel):
parametersInfo = list(AModel.parametersInfo)
parametersInfo.extend([
("title",STRING,lambda:"","figure title"),
("projections", LIST,lambda:[],"projections"),
("animation_period", INT,lambda:500,"refresh animation period in ms"),
])
class Slide(ASlide):
pass
# Atype Registration
RegisterType(ASlide)
RegisterType(Slide)
|
kevclarx/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_providers_facts
short_description: Retrieve facts about one or more oVirt/RHV external providers
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV external providers."
notes:
- "This module creates a new top-level C(ovirt_external_providers) fact, which
contains a list of external_providers."
options:
type:
description:
- "Type of the external provider."
choices: ['os_image', 'os_network', 'os_volume', 'foreman']
required: true
name:
description:
- "Name of the external provider, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all image external providers named C<glance>:
- ovirt_external_providers_facts:
type: os_image
name: glance
- debug:
var: ovirt_external_providers
'''
RETURN = '''
external_host_providers:
description: "List of dictionaries of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
returned: "On success and if parameter 'type: foreman' is used."
type: list
openstack_image_providers:
description: "List of dictionaries of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
returned: "On success and if parameter 'type: os_image' is used."
type: list
openstack_volume_providers:
description: "List of dictionaries of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
returned: "On success and if parameter 'type: os_volume' is used."
type: list
openstack_network_providers:
description: "List of dictionaries of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
returned: "On success and if parameter 'type: os_network' is used."
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def _external_provider_service(provider_type, system_service):
if provider_type == 'os_image':
return system_service.openstack_image_providers_service()
elif provider_type == 'os_network':
return system_service.openstack_network_providers_service()
elif provider_type == 'os_volume':
return system_service.openstack_volume_providers_service()
elif provider_type == 'foreman':
return system_service.external_host_providers_service()
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None, required=False),
type=dict(
default=None,
required=True,
choices=[
'os_image', 'os_network', 'os_volume', 'foreman',
],
aliases=['provider'],
),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
external_providers_service = _external_provider_service(
provider_type=module.params.pop('type'),
system_service=connection.system_service(),
)
if module.params['name']:
external_providers = [
e for e in external_providers_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
else:
external_providers = external_providers_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_external_providers=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in external_providers
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
Mega-DatA-Lab/mxnet
|
refs/heads/master
|
example/speech_recognition/main.py
|
44
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
from collections import namedtuple
from datetime import datetime
from config_util import parse_args, parse_contexts, generate_file_path
from train import do_training
import mxnet as mx
from stt_io_iter import STTIter
from label_util import LabelUtil
from log_util import LogUtil
import numpy as np
from stt_datagenerator import DataGenerator
from stt_metric import STTMetric
from stt_bi_graphemes_util import generate_bi_graphemes_dictionary
from stt_bucketing_module import STTBucketingModule
from stt_io_bucketingiter import BucketSTTIter
sys.path.insert(0, "../../python")
# os.environ['MXNET_ENGINE_TYPE'] = "NaiveEngine"
os.environ['MXNET_ENGINE_TYPE'] = "ThreadedEnginePerDevice"
os.environ['MXNET_ENABLE_GPU_P2P'] = "0"
class WHCS:
width = 0
height = 0
channel = 0
stride = 0
class ConfigLogger(object):
def __init__(self, log):
self.__log = log
def __call__(self, config):
self.__log.info("Config:")
config.write(self)
def write(self, data):
# stripping the data makes the output nicer and avoids empty lines
line = data.strip()
self.__log.info(line)
def load_labelutil(labelUtil, is_bi_graphemes, language="en"):
if language == "en":
if is_bi_graphemes:
try:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu_bi_graphemes.csv")
except:
raise Exception("There is no resources/unicodemap_en_baidu_bi_graphemes.csv." +
" Please set overwrite_bi_graphemes_dictionary True at train section")
else:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu.csv")
else:
raise Exception("Error: Language Type: %s" % language)
def load_data(args):
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception('mode must be the one of the followings - train,predict,load')
batch_size = args.config.getint('common', 'batch_size')
whcs = WHCS()
whcs.width = args.config.getint('data', 'width')
whcs.height = args.config.getint('data', 'height')
whcs.channel = args.config.getint('data', 'channel')
whcs.stride = args.config.getint('data', 'stride')
save_dir = 'checkpoints'
model_name = args.config.get('common', 'prefix')
is_bi_graphemes = args.config.getboolean('common', 'is_bi_graphemes')
overwrite_meta_files = args.config.getboolean('train', 'overwrite_meta_files')
overwrite_bi_graphemes_dictionary = args.config.getboolean('train', 'overwrite_bi_graphemes_dictionary')
max_duration = args.config.getfloat('data', 'max_duration')
language = args.config.get('data', 'language')
log = LogUtil().getlogger()
labelUtil = LabelUtil.getInstance()
if mode == "train" or mode == "load":
data_json = args.config.get('data', 'train_json')
val_json = args.config.get('data', 'val_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(data_json, max_duration=max_duration)
datagen.load_validation_data(val_json, max_duration=max_duration)
if is_bi_graphemes:
if not os.path.isfile("resources/unicodemap_en_baidu_bi_graphemes.csv") or overwrite_bi_graphemes_dictionary:
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=False, language=language)
generate_bi_graphemes_dictionary(datagen.train_texts+datagen.val_texts)
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=is_bi_graphemes, language=language)
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
if mode == "train":
if overwrite_meta_files:
log.info("Generate mean and std from samples")
normalize_target_k = args.config.getint('train', 'normalize_target_k')
datagen.sample_normalize(normalize_target_k, True)
else:
log.info("Read mean and std from meta files")
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == "load":
# get feat_mean and feat_std to normalize dataset
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == 'predict':
test_json = args.config.get('data', 'test_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(test_json, max_duration=max_duration)
labelutil = load_labelutil(labelUtil, is_bi_graphemes, language="en")
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
if batch_size == 1 and is_batchnorm and (mode == 'train' or mode == 'load'):
raise Warning('batch size 1 is too small for is_batchnorm')
# sort file paths by its duration in ascending order to implement sortaGrad
if mode == "train" or mode == "load":
max_t_count = datagen.get_max_seq_length(partition="train")
max_label_length = \
datagen.get_max_label_length(partition="train", is_bi_graphemes=is_bi_graphemes)
elif mode == "predict":
max_t_count = datagen.get_max_seq_length(partition="test")
max_label_length = \
datagen.get_max_label_length(partition="test", is_bi_graphemes=is_bi_graphemes)
args.config.set('arch', 'max_t_count', str(max_t_count))
args.config.set('arch', 'max_label_length', str(max_label_length))
from importlib import import_module
prepare_data_template = import_module(args.config.get('arch', 'arch_file'))
init_states = prepare_data_template.prepare_data(args)
sort_by_duration = (mode == "train")
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
save_feature_as_csvfile = args.config.getboolean('train', 'save_feature_as_csvfile')
if is_bucketing:
buckets = json.loads(args.config.get('arch', 'buckets'))
data_loaded = BucketSTTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
data_loaded = STTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
if mode == 'train' or mode == 'load':
if is_bucketing:
validation_loaded = BucketSTTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
validation_loaded = STTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
return data_loaded, validation_loaded, args
elif mode == 'predict':
return data_loaded, args
def load_model(args, contexts, data_train):
# load model from model_name prefix and epoch of model_num_epoch with gpu contexts of contexts
mode = args.config.get('common', 'mode')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
is_start_from_batch = args.config.getboolean('load', 'is_start_from_batch')
from importlib import import_module
symbol_template = import_module(args.config.get('arch', 'arch_file'))
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
if mode == 'train':
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_loaded = symbol_template.arch(args)
model_num_epoch = None
elif mode == 'load' or mode == 'predict':
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_path = 'checkpoints/' + str(model_name[:-5])
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
model_loaded = mx.module.Module.load(
prefix=model_path, epoch=model_num_epoch, context=contexts,
data_names=data_names, label_names=label_names,
load_optimizer_states=load_optimizer_states)
if is_start_from_batch:
import re
model_num_epoch = int(re.findall('\d+', model_file)[0])
return model_loaded, model_num_epoch
if __name__ == '__main__':
if len(sys.argv) <= 1:
raise Exception('cfg file path must be provided. ' +
'ex)python main.py --configfile examplecfg.cfg')
args = parse_args(sys.argv[1])
# set parameters from cfg file
# give random seed
random_seed = args.config.getint('common', 'random_seed')
mx_random_seed = args.config.getint('common', 'mx_random_seed')
# random seed for shuffling data list
if random_seed != -1:
np.random.seed(random_seed)
# set mx.random.seed to give seed for parameter initialization
if mx_random_seed != -1:
mx.random.seed(mx_random_seed)
else:
mx.random.seed(hash(datetime.now()))
# set log file name
log_filename = args.config.get('common', 'log_filename')
log = LogUtil(filename=log_filename).getlogger()
# set parameters from data section(common)
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode.')
# get meta file where character to number conversions are defined
contexts = parse_contexts(args)
num_gpu = len(contexts)
batch_size = args.config.getint('common', 'batch_size')
# check the number of gpus is positive divisor of the batch size for data parallel
if batch_size % num_gpu != 0:
raise Exception('num_gpu should be positive divisor of batch_size')
if mode == "train" or mode == "load":
data_train, data_val, args = load_data(args)
elif mode == "predict":
data_train, args = load_data(args)
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
# log current config
config_logger = ConfigLogger(log)
config_logger(args.config)
# load model
model_loaded, model_num_epoch = load_model(args, contexts, data_train)
# if mode is 'train', it trains the model
if mode == 'train':
if is_bucketing:
module = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
else:
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(model_loaded, context=contexts,
data_names=data_names, label_names=label_names)
do_training(args=args, module=module, data_train=data_train, data_val=data_val)
# if mode is 'load', it loads model from the checkpoint and continues the training.
elif mode == 'load':
do_training(args=args, module=model_loaded, data_train=data_train, data_val=data_val,
begin_epoch=model_num_epoch + 1)
# if mode is 'predict', it predict label from the input by the input model
elif mode == 'predict':
# predict through data
if is_bucketing:
max_t_count = args.config.getint('arch', 'max_t_count')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
model_path = 'checkpoints/' + str(model_name[:-5])
model = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
model.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
_, arg_params, aux_params = mx.model.load_checkpoint(model_path, model_num_epoch)
model.set_params(arg_params, aux_params)
model_loaded = model
else:
model_loaded.bind(for_training=False, data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label)
max_t_count = args.config.getint('arch', 'max_t_count')
eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu)
if is_batchnorm:
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
#model_loaded.score(eval_data=data_train, num_batch=None,
# eval_metric=eval_metric, reset=True)
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode')
|
donaloconnor/bitcoin
|
refs/heads/master
|
test/functional/feature_versionbits_warning.py
|
13
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test version bits warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
"""
import os
import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.mininode import P2PInterface, network_thread_start, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit")
class VersionBitsWarningTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
"""Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16)
for _ in range(numblocks):
block = create_block(tip, create_coinbase(height + 1), block_time)
block.nVersion = version
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def versionbits_in_alert_file(self):
"""Test that the versionbits warning has been written to the alert file."""
alert_text = open(self.alert_filename, 'r', encoding='utf8').read()
return VB_PATTERN.search(alert_text) is not None
def run_test(self):
# Handy alias
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
network_thread_start()
node.p2p.wait_for_verack()
# Mine one period worth of blocks
node.generate(VB_PERIOD)
self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
# Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
node.generate(VB_PERIOD - VB_THRESHOLD + 1)
# Check that we're not getting any versionbit-related errors in get*info()
assert(not VB_PATTERN.match(node.getmininginfo()["warnings"]))
assert(not VB_PATTERN.match(node.getnetworkinfo()["warnings"]))
self.log.info("Check that there is a warning if >50 blocks in the last 100 were an unknown version")
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(node.p2p, VB_THRESHOLD, VB_UNKNOWN_VERSION)
node.generate(VB_PERIOD - VB_THRESHOLD)
# Check that get*info() shows the 51/100 unknown block version error.
assert(WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
assert(WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared. This will move the versionbit state to ACTIVE.
node.generate(VB_PERIOD)
# Stop-start the node. This is required because bitcoind will only warn once about unknown versions or unknown rules activating.
self.restart_node(0)
# Generating one block guarantees that we'll get out of IBD
node.generate(1)
wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
# Generating one more block will be enough to generate an error.
node.generate(1)
# Check that get*info() shows the versionbits unknown rules warning
assert(WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"])
assert(WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"])
# Check that the alert file shows the versionbits unknown rules warning
wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
if __name__ == '__main__':
VersionBitsWarningTest().main()
|
xiandiancloud/edx-platform
|
refs/heads/master
|
common/test/acceptance/tests/test_studio_acid_xblock.py
|
10
|
"""
Acceptance tests for Studio related to the acid xblock.
"""
from unittest import skip
from nose.plugins.attrib import attr
from bok_choy.web_app_test import WebAppTest
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.overview import CourseOutlinePage
from ..pages.xblock.acid import AcidView
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
@attr('shard_1')
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('content'))
self.assertTrue(acid_block.scope_passed('settings'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
@attr('shard_1')
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
@attr('shard_1')
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
@skip('This will fail until we fix support of children in pure XBlocks')
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
@skip('This will fail until we fix support of children in pure XBlocks')
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
|
junalmeida/Sick-Beard
|
refs/heads/master
|
lib/requests/packages/chardet/big5prober.py
|
2930
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
lafiter/wwmmo
|
refs/heads/master
|
website/bs4/diagnose.py
|
431
|
"""Diagnostic functions, mainly for use when doing tech support."""
import cProfile
from StringIO import StringIO
from HTMLParser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems."""
print "Diagnostic running on Beautiful Soup %s" % __version__
print "Python version %s" % sys.version
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print (
"I noticed that %s is not installed. Installing it may help." %
name)
if 'lxml' in basic_parsers:
basic_parsers.append(["lxml", "xml"])
from lxml import etree
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
if 'html5lib' in basic_parsers:
import html5lib
print "Found html5lib version %s" % html5lib.__version__
if hasattr(data, 'read'):
data = data.read()
elif os.path.exists(data):
print '"%s" looks like a filename. Reading data from the file.' % data
data = open(data).read()
elif data.startswith("http:") or data.startswith("https:"):
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
return
print
for parser in basic_parsers:
print "Trying to parse your markup with %s" % parser
success = False
try:
soup = BeautifulSoup(data, parser)
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "Here's what %s did with the markup:" % parser
print soup.prettify()
print "-" * 80
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running.
"""
from lxml import etree
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
print("%s, %4s, %s" % (event, element.tag, element.text))
class AnnouncingParser(HTMLParser):
"""Announces HTMLParser parse events, without doing anything else."""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print "Comparative parser benchmark on Beautiful Soup %s" % __version__
data = rdoc(num_elements)
print "Generated a large invalid HTML document (%d bytes)." % len(data)
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "BS4+%s parsed the markup in %.2fs." % (parser, b-a)
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print "Raw lxml parsed the markup in %.2fs." % (b-a)
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print "Raw html5lib parsed the markup in %.2fs." % (b-a)
def profile(num_elements=100000, parser="lxml"):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
if __name__ == '__main__':
diagnose(sys.stdin.read())
|
ghtmtt/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsmergedfeaturerenderer.py
|
19
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMergedFeatureRenderer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '30/12/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize, QDir, Qt
from qgis.PyQt.QtGui import QColor
from qgis.core import (QgsRenderChecker,
QgsMapSettings,
QgsVectorLayer,
QgsMergedFeatureRenderer,
QgsSingleSymbolRenderer,
QgsFillSymbol,
QgsSimpleFillSymbolLayer,
QgsCategorizedSymbolRenderer,
QgsRendererCategory,
QgsSimpleLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsLineSymbol,
QgsTemplatedLineSymbolLayerBase,
QgsMarkerSymbol,
QgsMarkerSymbolLayer
)
from qgis.testing import unittest
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
class TestQgsMergedFeatureRenderer(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsMergedFeatureRenderer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testSinglePolys(self):
source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'polys_overlapping.shp'))
self.assertTrue(source.isValid())
map_settings = QgsMapSettings()
map_settings.setExtent(source.extent())
map_settings.setDestinationCrs(source.crs())
map_settings.setLayers([source])
layer = QgsSimpleFillSymbolLayer()
layer.setStrokeColor(QColor(0, 0, 0))
layer.setStrokeWidth(1)
layer.setColor(QColor(200, 250, 50))
symbol = QgsFillSymbol([layer])
sub_renderer = QgsSingleSymbolRenderer(symbol)
source.setRenderer(QgsMergedFeatureRenderer(sub_renderer))
self.assertTrue(self.imageCheck('single_subrenderer', 'single_subrenderer', map_settings))
def testCategorizedPolys(self):
source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'polys_overlapping_with_cat.shp'))
self.assertTrue(source.isValid())
map_settings = QgsMapSettings()
map_settings.setExtent(source.extent())
map_settings.setDestinationCrs(source.crs())
map_settings.setLayers([source])
layer = QgsSimpleFillSymbolLayer()
layer.setStrokeColor(QColor(0, 0, 0))
layer.setStrokeWidth(1)
layer.setColor(QColor(200, 250, 50, 150))
symbol1 = QgsFillSymbol()
symbol1.changeSymbolLayer(0, layer)
layer = QgsSimpleFillSymbolLayer()
layer.setStrokeColor(QColor(0, 0, 0))
layer.setStrokeWidth(1)
layer.setColor(QColor(50, 250, 200, 150))
symbol2 = QgsFillSymbol()
symbol2.changeSymbolLayer(0, layer)
sub_renderer = QgsCategorizedSymbolRenderer('cat', [QgsRendererCategory('cat1', symbol1, 'cat1'),
QgsRendererCategory('cat2', symbol2, 'cat2')
])
source.setRenderer(QgsMergedFeatureRenderer(sub_renderer))
self.assertTrue(self.imageCheck('polys_categorizedrenderer', 'polys_categorizedrenderer', map_settings))
def testSingleLines(self):
source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'lines_touching.shp'))
self.assertTrue(source.isValid())
map_settings = QgsMapSettings()
map_settings.setExtent(source.extent().buffered(2))
map_settings.setDestinationCrs(source.crs())
map_settings.setLayers([source])
layer = QgsSimpleLineSymbolLayer()
layer.setColor(QColor(0, 0, 0))
layer.setWidth(1)
symbol = QgsLineSymbol([layer])
layer2 = QgsMarkerLineSymbolLayer()
layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex)
marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '255,0,0', 'outline_style': 'no'})
layer2.setSubSymbol(marker)
symbol.appendSymbolLayer(layer2)
sub_renderer = QgsSingleSymbolRenderer(symbol)
source.setRenderer(QgsMergedFeatureRenderer(sub_renderer))
self.assertTrue(self.imageCheck('lines_single_subrenderer', 'lines_single_subrenderer', map_settings))
def testLinesCategorized(self):
source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'lines_touching.shp'))
self.assertTrue(source.isValid())
map_settings = QgsMapSettings()
map_settings.setExtent(source.extent().buffered(2))
map_settings.setDestinationCrs(source.crs())
map_settings.setLayers([source])
layer = QgsSimpleLineSymbolLayer()
layer.setColor(QColor(0, 0, 0))
layer.setWidth(1)
symbol1 = QgsLineSymbol()
symbol1.changeSymbolLayer(0, layer.clone())
layer2 = QgsMarkerLineSymbolLayer()
layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex)
marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '255,0,0', 'outline_style': 'no'})
layer2.setSubSymbol(marker)
symbol1.appendSymbolLayer(layer2)
symbol2 = QgsLineSymbol()
symbol2.changeSymbolLayer(0, layer.clone())
layer2 = QgsMarkerLineSymbolLayer()
layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex)
marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '0,255,0', 'outline_style': 'no'})
layer2.setSubSymbol(marker)
symbol2.appendSymbolLayer(layer2)
sub_renderer = QgsCategorizedSymbolRenderer('cat', [QgsRendererCategory('cat1', symbol1, 'cat1'),
QgsRendererCategory('cat2', symbol2, 'cat2')
])
source.setRenderer(QgsMergedFeatureRenderer(sub_renderer))
self.assertTrue(self.imageCheck('lines_categorized_subrenderer', 'lines_categorized_subrenderer', map_settings))
def imageCheck(self, name, reference_image, map_settings):
map_settings.setOutputDpi(96)
self.report += "<h2>Render {}</h2>\n".format(name)
checker = QgsRenderChecker()
checker.setControlPathPrefix("mergedfeaturerenderer")
checker.setControlName("expected_" + reference_image)
checker.setMapSettings(map_settings)
checker.setColorTolerance(2)
result = checker.runTest(name, 20)
self.report += checker.report()
print(self.report)
return result
if __name__ == '__main__':
unittest.main()
|
vi/enki
|
refs/heads/master
|
tests/run_all.py
|
1
|
#!/usr/bin/env python
import unittest
import persistent_qapplication
if __name__ == "__main__":
# Look for all tests. Using test_* instead of test_*.py finds modules (test_syntax and test_indenter).
suite = unittest.TestLoader().discover('.', pattern = "test_*")
unittest.TextTestRunner(verbosity=2).run(suite)
|
ThiagoGarciaAlves/erpnext
|
refs/heads/develop
|
erpnext/accounts/report/sales_partners_commission/__init__.py
|
12133432
| |
ifduyue/django
|
refs/heads/master
|
tests/fixtures_regress/__init__.py
|
12133432
| |
schatt/zulip
|
refs/heads/master
|
zerver/management/commands/fill_memcached_caches.py
|
121
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.cache_helpers import fill_memcached_cache, cache_fillers
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--cache', dest="cache", default=None),)
help = "Populate the memcached cache of messages."
def handle(self, *args, **options):
if options["cache"] is not None:
return fill_memcached_cache(options["cache"])
for cache in cache_fillers.keys():
fill_memcached_cache(cache)
|
topiaruss/django-filer
|
refs/heads/develop
|
runtests.py
|
5
|
#!/usr/bin/env python
from filer.test_utils.cli import configure
from filer.test_utils.tmpdir import temp_dir
import argparse
import sys
import warnings
def main(verbosity=1, failfast=False, test_labels=None, migrate=False):
verbosity = int(verbosity)
with temp_dir() as STATIC_ROOT:
with temp_dir() as MEDIA_ROOT:
with temp_dir() as FILE_UPLOAD_TEMP_DIR:
from django import VERSION
use_tz = VERSION[:2] >= (1, 4)
test_suffix = ""
if VERSION[:2] >= (1, 6):
test_suffix = ".tests"
if not test_labels:
test_labels = ['filer%s' % test_suffix]
else:
test_labels = ["filer%s.%s" % (test_suffix, label) for label in test_labels]
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
configure(
ROOT_URLCONF='test_urls',
STATIC_ROOT=STATIC_ROOT, MEDIA_ROOT=MEDIA_ROOT,
FILE_UPLOAD_TEMP_DIR=FILE_UPLOAD_TEMP_DIR,
SOUTH_TESTS_MIGRATE=migrate,
USE_TZ=use_tz)
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity, interactive=False, failfast=failfast)
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--failfast', action='store_true', default=False,
dest='failfast')
parser.add_argument('--verbosity', default=1)
parser.add_argument('--migrate', action='store_true', default=True)
parser.add_argument('test_labels', nargs='*')
args = parser.parse_args()
test_labels = ['%s' % label for label in args.test_labels]
main(verbosity=args.verbosity, failfast=args.failfast,
test_labels=test_labels, migrate=args.migrate)
|
kangkot/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/setup.py
|
95
|
from distutils.core import setup
if __name__ == '__main__':
setup(
scripts=[
'byteyears.py',
'checkpyc.py',
'copytime.py',
'crlf.py',
'dutree.py',
'ftpmirror.py',
'h2py.py',
'lfcr.py',
'../i18n/pygettext.py',
'logmerge.py',
'../../Lib/tabnanny.py',
'../../Lib/timeit.py',
'untabify.py',
],
)
|
petermatyas/timelapse
|
refs/heads/master
|
video.py
|
1
|
#!/usr/bin/python
import os
import time
import getpass
#=== Config ==============================================================================================
framerate = "24"
startDay = "20160201"
stopDay = "20190128"
textSize = "100" #
resolution = "1080x720" # 1080x720|
originalRes = "2592x1944"
toSzabi = "no" # yes|no
night = "no" # yes|no
pathToPictures = "/media/" + getpass.getuser() + "/KA/camera" # external hard drive contain the pictures
#workingFolder = os.getcwd()
workingFolder = "/home/peti/timelapseWork"
#=== Config end ==========================================================================================
#=== Separate night pictures =========================
if (night == "yes"):
os.system("python ./night.py")
#=== Create lists =====================================
os.system("ls " + pathToPictures + " > " + workingFolder + "/list_.txt") # create file list
f3 = open(workingFolder + '/list_.txt','r') # temporary / all files
f = open(workingFolder + '/list.txt','w') # all pictures
f2 = open(workingFolder + '/list2.txt','w') # days
lineArchive = " "
for line in f3:
line = line.rstrip('\n')
if line[0]=="2":
f.write(line + "\n")
if ((lineArchive != line[0:8]) and (int(line[0:8]) >= int(startDay)) and (int(line[0:8]) <= int(stopDay)) and not(os.path.isfile(workingFolder + "/" + line[0:8] + "_" + framerate + "fps_" + resolution + ".mp4"))):
f2.write(line[0:8] + "\n")
lineArchive = line[0:8]
f3.close()
f2.close()
f.close()
os.system("rm " + workingFolder + "/list_.txt")
#=== Select pictures and convert video ================
f2 = open(workingFolder + '/list2.txt','r')
for line2 in f2:
f = open(workingFolder + '/list.txt','r')
line2 = line2.rstrip('\n')
print "copy files: " + line2
year2 = line2[0:4]
month2 = line2[4:6]
day2 = line2[6:8]
for line in f:
line = line.rstrip('\n')
year = line[0:4]
month = line[4:6]
day = line[6:8]
text = year + "." + month + "." + day
if ((year == year2) and (month == month2) and (day == day2)):
# copy to working folder
os.system("cp " + pathToPictures + "/" + line + " " + workingFolder)
# labelling
os.system("convert " + workingFolder + "/" + line + " -pointsize " + textSize + " -fill white -gravity southeast -annotate +100+100 " + text + " " + workingFolder + "/0" + line)
# remove original picture
os.system("rm " + workingFolder + "/" + line)
if (toSzabi == "yes"):
# convert original size video
os.system("ffmpeg -r " + framerate + " -i " + workingFolder + "/%*.png " + " -s hd1080 -vcodec libx264 " + workingFolder + "/" + line2 + "_" + framerate + "fps_" + "2592x1944" + ".mp4")
f.close()
f = open(workingFolder + '/list.txt','r')
line2 = line2.rstrip('\n')
year2 = line2[0:4]
month2 = line2[4:6]
day2 = line2[6:8]
for line in f:
line = line.rstrip('\n')
year = line[0:4]
month = line[4:6]
day = line[6:8]
text = year + "/" + month + "/" + day
if ((year == year2) and (month == month2) and (day == day2)):
# resize and crop picture:
os.system("convert " + workingFolder + "/0" + line + " -gravity south -crop 2592x1458+0+0 " + workingFolder + "/" + line)
# remove the temp pictures:
os.system("rm " + workingFolder + "/0" + line)
# convert video
os.system("ffmpeg -r " + framerate + " -i " + workingFolder + "/%*.png " + " -s hd1080 -vcodec libx264 " + workingFolder + "/" + line2 + "_" + framerate + "fps_" + resolution + ".mp4")
# clear pictures
os.system("rm " + workingFolder + "/*.png")
f.close()
f2.close()
#=== Merge videos =====================================
print "merge videos"
os.system("ls " + workingFolder + " | grep mp4 > videolist0.txt")
f3 = open(workingFolder + '/videolist.txt','w')
f4 = open('videolist0.txt','r')
f5 = open(workingFolder + '/videolistoriginal.txt','w')
for line in f4:
if (line[9:11] == framerate) and (int(line[0:8]) >= int(startDay)):
if (line.find(resolution) > 0):
f3.write("file " + line)
if (line.find(originalRes) > 0):
f5.write("file " + line)
f3.close()
f4.close()
f5.close()
os.system("ffmpeg -f concat -i " + workingFolder + "/videolist.txt -codec copy output_" + framerate + "fps" + resolution + ".mp4")
if (toSzabi == "yes"):
os.system("ffmpeg -f concat -i " + workingFolder + "/videolistoriginal.txt -codec copy output_" + framerate + "fps" + originalRes + "_toSzabi.mp4")
#== Clean =============================================
os.system("rm videolist0.txt")
os.system("rm " + workingFolder + "/list.txt")
os.system("rm " + workingFolder + "/list2.txt")
os.system("rm " + workingFolder + "/videolist.txt")
os.system("rm " + workingFolder + "/videolistoriginal.txt")
|
diogocs1/comps
|
refs/heads/master
|
web/addons/hr_attendance/wizard/__init__.py
|
375
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance_error
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hkariti/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vultr/vr_startup_script.py
|
39
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vr_startup_script
short_description: Manages startup scripts on Vultr.
description:
- Create, update and remove startup scripts.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The script name.
required: true
script_type:
description:
- The script type, can not be changed once created.
default: boot
choices: [ boot, pxe ]
aliases: [ type ]
script:
description:
- The script source code.
- Required if (state=present).
state:
description:
- State of the script.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: ensure a pxe script exists, source from a file
local_action:
module: vr_startup_script
name: my_web_script
script_type: pxe
script: "{{ lookup('file', 'path/to/script') }}"
- name: ensure a boot script exists
local_action:
module: vr_startup_script
name: vr_startup_script
script: "#!/bin/bash\necho Hello World > /root/hello"
- name: ensure a script is absent
local_action:
module: vr_startup_script
name: my_web_script
state: absent
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: string
sample: "https://api.vultr.com"
vultr_startup_script:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: ID of the startup script.
returned: success
type: string
sample: 249395
name:
description: Name of the startup script.
returned: success
type: string
sample: my startup script
script:
description: The source code of the startup script.
returned: success
type: string
sample: "#!/bin/bash\necho Hello World > /root/hello"
script_type:
description: The type of the startup script.
returned: success
type: string
sample: pxe
date_created:
description: Date the startup script was created.
returned: success
type: string
sample: "2017-08-26 12:47:48"
date_modified:
description: Date the startup script was modified.
returned: success
type: string
sample: "2017-08-26 12:47:48"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrStartupScript(Vultr):
def __init__(self, module):
super(AnsibleVultrStartupScript, self).__init__(module, "vultr_startup_script")
self.returns = {
'SCRIPTID': dict(key='id'),
'type': dict(key='script_type'),
'name': dict(key='name'),
'script': dict(),
'date_created': dict(),
'date_modified': dict(),
}
def get_script(self):
scripts = self.api_query(path="/v1/startupscript/list")
name = self.module.params.get('name')
if scripts:
for script_id, script_data in scripts.items():
if script_data.get('name') == name:
return script_data
return {}
def present_script(self):
script = self.get_script()
if not script:
script = self._create_script(script)
else:
script = self._update_script(script)
return script
def _create_script(self, script):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'script': self.module.params.get('script'),
'type': self.module.params.get('script_type'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/create",
method="POST",
data=data
)
script = self.get_script()
return script
def _update_script(self, script):
if script['script'] != self.module.params.get('script'):
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
'script': self.module.params.get('script'),
}
self.result['diff']['before'] = script
self.result['diff']['after'] = script.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/update",
method="POST",
data=data
)
script = self.get_script()
return script
def absent_script(self):
script = self.get_script()
if script:
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
}
self.result['diff']['before'] = script
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/destroy",
method="POST",
data=data
)
return script
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
script=dict(),
script_type=dict(default='boot', choices=['boot', 'pxe'], aliases=['type']),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['script']),
],
supports_check_mode=True,
)
vr_script = AnsibleVultrStartupScript(module)
if module.params.get('state') == "absent":
script = vr_script.absent_script()
else:
script = vr_script.present_script()
result = vr_script.get_result(script)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
hanjihun/Car
|
refs/heads/master
|
seguroweb_com_ar/seguroweb_com_ar/spiders/categories_of_seguroweb_com_ar.py
|
1
|
import scrapy
from scrapy import Request, FormRequest
from urlparse import urlparse
from collections import OrderedDict
from datetime import date
import time, json, re, csv
class CategoriesOfSegurowebComAr(scrapy.Spider):
name = "categories_of_seguroweb_com_ar"
start_urls = ['http://seguroweb.com.ar/autos/']
brand_model_list_file_name = "brand_model_list.csv"
location_list_file_name = "location_list.csv"
def parse(self, response):
brand_model_list = []
location_list = []
with open(self.brand_model_list_file_name, 'rb') as brand_model_list_file:
reader = csv.reader(brand_model_list_file)
for index, row in enumerate(reader):
if index == 0:
continue
brand_model_list.append({'brand_name':row[0], 'brand_id':row[1], 'version_name':row[2], 'model_id':row[3], 'version_id':row[4], 'year':row[5], 'age':row[6]})
with open(self.location_list_file_name, 'rb') as location_list_file:
reader = csv.reader(location_list_file)
for index, row in enumerate(reader):
if index == 0:
continue
location_list.append({'prov_name':row[0], 'city_name':row[1], 'prov_id':row[2], 'city_id':row[3]})
links = []
# CHEVROLET---AGILE*1.4*LS*L/14---2013---40---12---56---120360---LA*PAMPA---LOS*OLIVOS---2---18385
for location in location_list:
for brand_model in brand_model_list:
link = brand_model['brand_name'].replace(' ','*') + "---" + brand_model['brand_id'].replace(' ','*') + "---" + brand_model['year'] + "---" + brand_model['age'] + "---" + brand_model['brand_id'] + "---" + brand_model['model_id'] + "---" + brand_model['version_id']
link = link + "---" + location['prov_name'].replace(' ','*') + "---" + location['city_name'].replace(' ','*') + "---" + location['prov_id'] + "---" + location['city_id']
links.append(link)
yield {'links':links}
### To list the zip code and the location name. Output the result .
### Do not delete below code ###
def parse_for_models(self, response):
brands = response.xpath('//*[@name="marca"]/optgroup/option')
for brand in brands:
id = brand.xpath('@value').extract_first()
name = brand.xpath('text()').extract_first()
info = {'brand_id':id, 'brand_name':name}
url = "http://seguroweb.com.ar/autos/version.php?id={}&id2=undefined".format(id)
yield Request(url, self.get_models, meta={'info':info})
def get_models(self, response):
models = response.xpath('//*[@name="version"]/option[position()>1]')
info = response.meta['info']
for model in models:
id = model.xpath('@value').extract_first()
name = model.xpath('text()').extract_first()
info1 = {'model_id':id, 'model_name':name}
info1.update(info)
url = "http://seguroweb.com.ar/autos/modelo.php?id={}&id2={}".format(id, info['brand_id'])
yield Request(url, self.get_versions, meta={'info':info1})
def get_versions(self, response):
versions = response.xpath('//*[@name="modelo"]/option[position()>1]')
info = response.meta['info']
for version in versions:
id = version.xpath('@value').extract_first()
name = version.xpath('text()').extract_first()
info1 = {'version_id':id, 'version_name':name}
info1.update(info)
url = "http://seguroweb.com.ar/autos/anio.php?id={}&id2=undefined".format(id)
yield Request(url, self.get_years, meta={'info':info1})
def get_years(self, response):
years = response.xpath('//*[@name="anio"]/option[position()>1]')
info = response.meta['info']
print info
for year in years:
id = year.xpath('@value').extract_first()
name = year.xpath('text()').extract_first()
item = OrderedDict()
item['brand_name'] = info['brand_name']
item['brand_id'] = info['brand_id']
item['version_name'] = info['version_name']
item['model_id'] = info['model_id']
item['version_id'] = info['version_id']
item['year'] = id
item['age'] = "40"
yield item
def parse_for_location(self, response):
print "##"
provinces = response.xpath('//*[@name="provincia"]/option[position()>1]')
for province in provinces:
id = province.xpath('@value').extract_first()
name = province.xpath('text()').extract_first()
url = "http://seguroweb.com.ar/autos/localidad.php?id={}&id2=undefined".format(id)
yield Request(url, self.get_cities, meta={'province_id':id, 'province_name':name})
def get_cities(self, response):
cities = response.xpath('//*[@name="localidad"]/option[position()>1]')
for city in cities:
id = city.xpath('./@value').extract_first()
name = city.xpath('./text()').extract_first()
yield {'prov_name':response.meta['province_name'], 'city_name':name, 'prov_id':response.meta['province_id'], 'city_id':id}
|
fredericlepied/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/exoscale.py
|
88
|
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
# import module snippets
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import integer_types, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url
EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
def exo_dns_argument_spec():
return dict(
api_key=dict(default=None, no_log=True),
api_secret=dict(default=None, no_log=True),
api_timeout=dict(type='int', default=10),
api_region=dict(default='cloudstack'),
validate_certs=dict(default='yes', type='bool'),
)
def exo_dns_required_together():
return [['api_key', 'api_secret']]
class ExoDns(object):
def __init__(self, module):
self.module = module
self.api_key = self.module.params. get('api_key')
self.api_secret = self.module.params.get('api_secret')
if not (self.api_key and self.api_secret):
try:
region = self.module.params.get('api_region')
config = self.read_config(ini_group=region)
self.api_key = config['key']
self.api_secret = config['secret']
except Exception:
e = get_exception()
self.module.fail_json(msg="Error while processing config: %s" % e)
self.headers = {
'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.result = {
'changed': False,
'diff': {
'before': {},
'after': {},
}
}
def read_config(self, ini_group=None):
if not ini_group:
ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
keys = ['key', 'secret']
env_conf = {}
for key in keys:
if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
break
else:
env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
else:
return env_conf
# Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
# Last read wins in configparser
paths = (
os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
os.path.join(os.getcwd(), 'cloudstack.ini'),
)
# Look at CLOUDSTACK_CONFIG first if present
if 'CLOUDSTACK_CONFIG' in os.environ:
paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
if not any([os.path.exists(c) for c in paths]):
self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
conf = configparser.ConfigParser()
conf.read(paths)
return dict(conf.items(ini_group))
def api_query(self, resource="/domains", method="GET", data=None):
url = EXO_DNS_BASEURL + resource
if data:
data = self.module.jsonify(data)
response, info = fetch_url(
module=self.module,
url=url,
data=data,
method=method,
headers=self.headers,
timeout=self.module.params.get('api_timeout'),
)
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return self.module.from_json(to_text(response.read()))
except Exception:
e = get_exception()
self.module.fail_json(msg="Could not process response into json: %s" % e)
def has_changed(self, want_dict, current_dict, only_keys=None):
changed = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(current_dict[key], integer_types):
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
elif isinstance(current_dict[key], string_types):
if value.lower() != current_dict[key].lower():
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
else:
self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
else:
self.result['diff']['after'][key] = value
changed = True
return changed
|
Belxjander/Kirito
|
refs/heads/master
|
Python-3.5.0-main/Lib/idlelib/Debugger.py
|
76
|
import os
import bdb
from tkinter import *
from idlelib.WindowList import ListedToplevel
from idlelib.ScrolledList import ScrolledList
from idlelib import macosxSupport
class Idb(bdb.Bdb):
def __init__(self, gui):
self.gui = gui
bdb.Bdb.__init__(self)
def user_line(self, frame):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame)
def user_exception(self, frame, info):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame, info)
def in_rpc_code(self, frame):
if frame.f_code.co_filename.count('rpc.py'):
return True
else:
prev_frame = frame.f_back
if prev_frame.f_code.co_filename.count('Debugger.py'):
# (that test will catch both Debugger.py and RemoteDebugger.py)
return False
return self.in_rpc_code(prev_frame)
def __frame2message(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
return message
class Debugger:
vstack = vsource = vlocals = vglobals = None
def __init__(self, pyshell, idb=None):
if idb is None:
idb = Idb(self)
self.pyshell = pyshell
self.idb = idb
self.frame = None
self.make_gui()
self.interacting = 0
def run(self, *args):
try:
self.interacting = 1
return self.idb.run(*args)
finally:
self.interacting = 0
def close(self, event=None):
if self.interacting:
self.top.bell()
return
if self.stackviewer:
self.stackviewer.close(); self.stackviewer = None
# Clean up pyshell if user clicked debugger control close widget.
# (Causes a harmless extra cycle through close_debugger() if user
# toggled debugger from pyshell Debug menu)
self.pyshell.close_debugger()
# Now close the debugger control window....
self.top.destroy()
def make_gui(self):
pyshell = self.pyshell
self.flist = pyshell.flist
self.root = root = pyshell.root
self.top = top = ListedToplevel(root)
self.top.wm_title("Debug Control")
self.top.wm_iconname("Debug")
top.wm_protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<Escape>", self.close)
#
self.bframe = bframe = Frame(top)
self.bframe.pack(anchor="w")
self.buttons = bl = []
#
self.bcont = b = Button(bframe, text="Go", command=self.cont)
bl.append(b)
self.bstep = b = Button(bframe, text="Step", command=self.step)
bl.append(b)
self.bnext = b = Button(bframe, text="Over", command=self.next)
bl.append(b)
self.bret = b = Button(bframe, text="Out", command=self.ret)
bl.append(b)
self.bret = b = Button(bframe, text="Quit", command=self.quit)
bl.append(b)
#
for b in bl:
b.configure(state="disabled")
b.pack(side="left")
#
self.cframe = cframe = Frame(bframe)
self.cframe.pack(side="left")
#
if not self.vstack:
self.__class__.vstack = BooleanVar(top)
self.vstack.set(1)
self.bstack = Checkbutton(cframe,
text="Stack", command=self.show_stack, variable=self.vstack)
self.bstack.grid(row=0, column=0)
if not self.vsource:
self.__class__.vsource = BooleanVar(top)
self.bsource = Checkbutton(cframe,
text="Source", command=self.show_source, variable=self.vsource)
self.bsource.grid(row=0, column=1)
if not self.vlocals:
self.__class__.vlocals = BooleanVar(top)
self.vlocals.set(1)
self.blocals = Checkbutton(cframe,
text="Locals", command=self.show_locals, variable=self.vlocals)
self.blocals.grid(row=1, column=0)
if not self.vglobals:
self.__class__.vglobals = BooleanVar(top)
self.bglobals = Checkbutton(cframe,
text="Globals", command=self.show_globals, variable=self.vglobals)
self.bglobals.grid(row=1, column=1)
#
self.status = Label(top, anchor="w")
self.status.pack(anchor="w")
self.error = Label(top, anchor="w")
self.error.pack(anchor="w", fill="x")
self.errorbg = self.error.cget("background")
#
self.fstack = Frame(top, height=1)
self.fstack.pack(expand=1, fill="both")
self.flocals = Frame(top)
self.flocals.pack(expand=1, fill="both")
self.fglobals = Frame(top, height=1)
self.fglobals.pack(expand=1, fill="both")
#
if self.vstack.get():
self.show_stack()
if self.vlocals.get():
self.show_locals()
if self.vglobals.get():
self.show_globals()
def interaction(self, message, frame, info=None):
self.frame = frame
self.status.configure(text=message)
#
if info:
type, value, tb = info
try:
m1 = type.__name__
except AttributeError:
m1 = "%s" % str(type)
if value is not None:
try:
m1 = "%s: %s" % (m1, str(value))
except:
pass
bg = "yellow"
else:
m1 = ""
tb = None
bg = self.errorbg
self.error.configure(text=m1, background=bg)
#
sv = self.stackviewer
if sv:
stack, i = self.idb.get_stack(self.frame, tb)
sv.load_stack(stack, i)
#
self.show_variables(1)
#
if self.vsource.get():
self.sync_source_line()
#
for b in self.buttons:
b.configure(state="normal")
#
self.top.wakeup()
self.root.mainloop()
#
for b in self.buttons:
b.configure(state="disabled")
self.status.configure(text="")
self.error.configure(text="", background=self.errorbg)
self.frame = None
def sync_source_line(self):
frame = self.frame
if not frame:
return
filename, lineno = self.__frame2fileline(frame)
if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
self.flist.gotofileline(filename, lineno)
def __frame2fileline(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
return filename, lineno
def cont(self):
self.idb.set_continue()
self.root.quit()
def step(self):
self.idb.set_step()
self.root.quit()
def next(self):
self.idb.set_next(self.frame)
self.root.quit()
def ret(self):
self.idb.set_return(self.frame)
self.root.quit()
def quit(self):
self.idb.set_quit()
self.root.quit()
stackviewer = None
def show_stack(self):
if not self.stackviewer and self.vstack.get():
self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
if self.frame:
stack, i = self.idb.get_stack(self.frame, None)
sv.load_stack(stack, i)
else:
sv = self.stackviewer
if sv and not self.vstack.get():
self.stackviewer = None
sv.close()
self.fstack['height'] = 1
def show_source(self):
if self.vsource.get():
self.sync_source_line()
def show_frame(self, stackitem):
self.frame = stackitem[0] # lineno is stackitem[1]
self.show_variables()
localsviewer = None
globalsviewer = None
def show_locals(self):
lv = self.localsviewer
if self.vlocals.get():
if not lv:
self.localsviewer = NamespaceViewer(self.flocals, "Locals")
else:
if lv:
self.localsviewer = None
lv.close()
self.flocals['height'] = 1
self.show_variables()
def show_globals(self):
gv = self.globalsviewer
if self.vglobals.get():
if not gv:
self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
else:
if gv:
self.globalsviewer = None
gv.close()
self.fglobals['height'] = 1
self.show_variables()
def show_variables(self, force=0):
lv = self.localsviewer
gv = self.globalsviewer
frame = self.frame
if not frame:
ldict = gdict = None
else:
ldict = frame.f_locals
gdict = frame.f_globals
if lv and gv and ldict is gdict:
ldict = None
if lv:
lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
if gv:
gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
def set_breakpoint_here(self, filename, lineno):
self.idb.set_break(filename, lineno)
def clear_breakpoint_here(self, filename, lineno):
self.idb.clear_break(filename, lineno)
def clear_file_breaks(self, filename):
self.idb.clear_all_file_breaks(filename)
def load_breakpoints(self):
"Load PyShellEditorWindow breakpoints into subprocess debugger"
for editwin in self.pyshell.flist.inversedict:
filename = editwin.io.filename
try:
for lineno in editwin.breakpoints:
self.set_breakpoint_here(filename, lineno)
except AttributeError:
continue
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
if macosxSupport.isAquaTk():
# At least on with the stock AquaTk version on OSX 10.4 you'll
# get an shaking GUI that eventually kills IDLE if the width
# argument is specified.
ScrolledList.__init__(self, master)
else:
ScrolledList.__init__(self, master, width=80)
self.flist = flist
self.gui = gui
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
import linecache
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
"override base method"
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
"override base method"
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
"override base method"
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def on_double(self, index):
"override base method"
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
import reprlib
self.repr = reprlib.Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0, rpc_client=None):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in list(subframe.children.values()):
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
#names = sorted(dict)
###
# Because of (temporary) limitations on the dict_keys type (not yet
# public or pickleable), have the subprocess to send a list of
# keys, not a dict_keys object. sorted() will take a dict_keys
# (no subprocess) or a list.
#
# There is also an obscure bug in sorted(dict) where the
# interpreter gets into a loop requesting non-existing dict[0],
# dict[1], dict[2], etc from the RemoteDebugger.DictProxy.
###
keys_list = dict.keys()
names = sorted(keys_list)
###
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
# Strip extra quotes caused by calling repr on the (already)
# repr'd value sent across the RPC interface:
if rpc_client:
svalue = svalue[1:-1]
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
|
HBEE/odoo-addons
|
refs/heads/8.0
|
inter_company_rules/models/res_config.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class inter_company_rules_configuration(models.TransientModel):
_inherit = 'base.config.settings'
company_id = fields.Many2one('res.company', string='Select Company',
help='Select company to setup Inter company rules.')
rule_type = fields.Selection([('so_and_po', 'SO and PO setting for inter company'),
('invoice_and_refunds', 'Create Invoice/Refunds when encoding invoice/refunds')],
help='Select the type to setup inter company rules in selected company.')
so_from_po = fields.Boolean(string='Create Sale Orders when buying to this company',
help='Generate a Sale Order when a Purchase Order with this company as supplier is created.')
po_from_so = fields.Boolean(string='Create Purchase Orders when selling to this company',
help='Generate a Purchase Order when a Sale Order with this company as customer is created.')
auto_validation = fields.Boolean(string='Sale/Purchase Orders Auto Validation',
help='''When a Sale Order or a Purchase Order is created by a multi
company rule for this company, it will automatically validate it.''')
warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse For Purchase Orders',
help='Default value to set on Purchase Orders that will be created based on Sale Orders made to this company.')
@api.onchange('rule_type')
def onchange_rule_type(self):
if self.rule_type == 'invoice_and_refunds':
self.so_from_po = False
self.po_from_so = False
self.auto_validation = False
elif self.rule_type == 'so_and_po':
self.invoice_and_refunds = False
@api.onchange('company_id')
def onchange_company_id(self):
if self.company_id:
rule_type = False
if self.company_id.so_from_po or self.company_id.po_from_so or self.company_id.auto_validation:
rule_type = 'so_and_po'
elif self.company_id.auto_generate_invoices:
rule_type = 'invoice_and_refunds'
self.rule_type = rule_type
self.so_from_po = self.company_id.so_from_po
self.po_from_so = self.company_id.po_from_so
self.auto_validation = self.company_id.auto_validation
self.warehouse_id = self.company_id.warehouse_id.id
@api.multi
def set_inter_company_configuration(self):
if self.company_id:
vals = {
'so_from_po': self.so_from_po,
'po_from_so': self.po_from_so,
'auto_validation': self.auto_validation,
'auto_generate_invoices': True if self.rule_type == 'invoice_and_refunds' else False,
'warehouse_id': self.warehouse_id.id
}
self.company_id.write(vals)
|
Architektor/PySnip
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/mail/test/test_options.py
|
2
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.mail.tap}.
"""
from twisted.trial.unittest import TestCase
from twisted.python.usage import UsageError
from twisted.mail import protocols
from twisted.mail.tap import Options, makeService
from twisted.python.filepath import FilePath
from twisted.python.reflect import requireModule
from twisted.internet import endpoints, defer
if requireModule('OpenSSL') is None:
sslSkip = 'Missing OpenSSL package.'
else:
sslSkip = None
class OptionsTests(TestCase):
"""
Tests for the command line option parser used for I{twistd mail}.
"""
def setUp(self):
self.aliasFilename = self.mktemp()
aliasFile = file(self.aliasFilename, 'w')
aliasFile.write('someuser:\tdifferentuser\n')
aliasFile.close()
def testAliasesWithoutDomain(self):
"""
Test that adding an aliases(5) file before adding a domain raises a
UsageError.
"""
self.assertRaises(
UsageError,
Options().parseOptions,
['--aliases', self.aliasFilename])
def testAliases(self):
"""
Test that adding an aliases(5) file to an IAliasableDomain at least
doesn't raise an unhandled exception.
"""
Options().parseOptions([
'--maildirdbmdomain', 'example.com=example.com',
'--aliases', self.aliasFilename])
def test_barePort(self):
"""
A bare port passed to I{--pop3} results in deprecation warning in
addition to a TCP4ServerEndpoint.
"""
options = Options()
options.parseOptions(['--pop3', '8110'])
self.assertEqual(len(options['pop3']), 1)
self.assertIsInstance(
options['pop3'][0], endpoints.TCP4ServerEndpoint)
warnings = self.flushWarnings([options.opt_pop3])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Specifying plain ports and/or a certificate is deprecated since "
"Twisted 11.0; use endpoint descriptions instead.")
def _endpointTest(self, service):
"""
Use L{Options} to parse a single service configuration parameter and
verify that an endpoint of the correct type is added to the list for
that service.
"""
options = Options()
options.parseOptions(['--' + service, 'tcp:1234'])
self.assertEqual(len(options[service]), 1)
self.assertIsInstance(
options[service][0], endpoints.TCP4ServerEndpoint)
def test_endpointSMTP(self):
"""
When I{--smtp} is given a TCP endpoint description as an argument, a
TCPServerEndpoint is added to the list of SMTP endpoints.
"""
self._endpointTest('smtp')
def test_endpointPOP3(self):
"""
When I{--pop3} is given a TCP endpoint description as an argument, a
TCPServerEndpoint is added to the list of POP3 endpoints.
"""
self._endpointTest('pop3')
def test_protoDefaults(self):
"""
POP3 and SMTP each listen on a TCP4ServerEndpoint by default.
"""
options = Options()
options.parseOptions([])
self.assertEqual(len(options['pop3']), 1)
self.assertIsInstance(
options['pop3'][0], endpoints.TCP4ServerEndpoint)
self.assertEqual(len(options['smtp']), 1)
self.assertIsInstance(
options['smtp'][0], endpoints.TCP4ServerEndpoint)
def test_protoDisable(self):
"""
The I{--no-pop3} and I{--no-smtp} options disable POP3 and SMTP
respectively.
"""
options = Options()
options.parseOptions(['--no-pop3'])
self.assertEqual(options._getEndpoints(None, 'pop3'), [])
self.assertNotEqual(options._getEndpoints(None, 'smtp'), [])
options = Options()
options.parseOptions(['--no-smtp'])
self.assertNotEqual(options._getEndpoints(None, 'pop3'), [])
self.assertEqual(options._getEndpoints(None, 'smtp'), [])
def test_allProtosDisabledError(self):
"""
If all protocols are disabled, L{UsageError} is raised.
"""
options = Options()
self.assertRaises(
UsageError, options.parseOptions, (['--no-pop3', '--no-smtp']))
def test_pop3sBackwardCompatibility(self):
"""
The deprecated I{--pop3s} and I{--certificate} options set up a POP3 SSL
server.
"""
cert = FilePath(__file__).sibling("server.pem")
options = Options()
options.parseOptions(['--pop3s', '8995',
'--certificate', cert.path])
self.assertEqual(len(options['pop3']), 2)
self.assertIsInstance(
options['pop3'][0], endpoints.SSL4ServerEndpoint)
self.assertIsInstance(
options['pop3'][1], endpoints.TCP4ServerEndpoint)
warnings = self.flushWarnings([options.postOptions])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Specifying plain ports and/or a certificate is deprecated since "
"Twisted 11.0; use endpoint descriptions instead.")
if sslSkip is not None:
test_pop3sBackwardCompatibility.skip = sslSkip
def test_esmtpWithoutHostname(self):
"""
If I{--esmtp} is given without I{--hostname}, L{Options.parseOptions}
raises L{UsageError}.
"""
options = Options()
exc = self.assertRaises(UsageError, options.parseOptions, ['--esmtp'])
self.assertEqual("--esmtp requires --hostname", str(exc))
def test_auth(self):
"""
Tests that the --auth option registers a checker.
"""
options = Options()
options.parseOptions(['--auth', 'memory:admin:admin:bob:password'])
self.assertEqual(len(options['credCheckers']), 1)
checker = options['credCheckers'][0]
interfaces = checker.credentialInterfaces
registered_checkers = options.service.smtpPortal.checkers
for iface in interfaces:
self.assertEqual(checker, registered_checkers[iface])
class SpyEndpoint(object):
"""
SpyEndpoint remembers what factory it is told to listen with.
"""
listeningWith = None
def listen(self, factory):
self.listeningWith = factory
return defer.succeed(None)
class MakeServiceTests(TestCase):
"""
Tests for L{twisted.mail.tap.makeService}
"""
def _endpointServerTest(self, key, factoryClass):
"""
Configure a service with two endpoints for the protocol associated with
C{key} and verify that when the service is started a factory of type
C{factoryClass} is used to listen on each of them.
"""
cleartext = SpyEndpoint()
secure = SpyEndpoint()
config = Options()
config[key] = [cleartext, secure]
service = makeService(config)
service.privilegedStartService()
service.startService()
self.addCleanup(service.stopService)
self.assertIsInstance(cleartext.listeningWith, factoryClass)
self.assertIsInstance(secure.listeningWith, factoryClass)
def test_pop3(self):
"""
If one or more endpoints is included in the configuration passed to
L{makeService} for the C{"pop3"} key, a service for starting a POP3
server is constructed for each of them and attached to the returned
service.
"""
self._endpointServerTest("pop3", protocols.POP3Factory)
def test_smtp(self):
"""
If one or more endpoints is included in the configuration passed to
L{makeService} for the C{"smtp"} key, a service for starting an SMTP
server is constructed for each of them and attached to the returned
service.
"""
self._endpointServerTest("smtp", protocols.SMTPFactory)
|
proversity-org/edx-platform
|
refs/heads/master
|
common/djangoapps/third_party_auth/migrations/0015_samlproviderconfig_archived.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0014_auto_20171222_1233'),
]
operations = [
migrations.AddField(
model_name='samlproviderconfig',
name='archived',
field=models.BooleanField(default=False),
),
]
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_Pandas_Numpy/source3.6/tensorboard/plugins/beholder/visualizer.py
|
4
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from math import floor, sqrt
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import im_util
from tensorboard.plugins.beholder.shared_config import SECTION_HEIGHT,\
IMAGE_WIDTH, DEFAULT_CONFIG, SECTION_INFO_FILENAME
from tensorboard.plugins.beholder.file_system_tools import write_pickle
MIN_SQUARE_SIZE = 3
class Visualizer(object):
def __init__(self, logdir):
self.logdir = logdir
self.sections_over_time = deque([], DEFAULT_CONFIG['window_size'])
self.config = dict(DEFAULT_CONFIG)
self.old_config = dict(DEFAULT_CONFIG)
def _reshape_conv_array(self, array, section_height, image_width):
'''Reshape a rank 4 array to be rank 2, where each column of block_width is
a filter, and each row of block height is an input channel. For example:
[[[[ 11, 21, 31, 41],
[ 51, 61, 71, 81],
[ 91, 101, 111, 121]],
[[ 12, 22, 32, 42],
[ 52, 62, 72, 82],
[ 92, 102, 112, 122]],
[[ 13, 23, 33, 43],
[ 53, 63, 73, 83],
[ 93, 103, 113, 123]]],
[[[ 14, 24, 34, 44],
[ 54, 64, 74, 84],
[ 94, 104, 114, 124]],
[[ 15, 25, 35, 45],
[ 55, 65, 75, 85],
[ 95, 105, 115, 125]],
[[ 16, 26, 36, 46],
[ 56, 66, 76, 86],
[ 96, 106, 116, 126]]],
[[[ 17, 27, 37, 47],
[ 57, 67, 77, 87],
[ 97, 107, 117, 127]],
[[ 18, 28, 38, 48],
[ 58, 68, 78, 88],
[ 98, 108, 118, 128]],
[[ 19, 29, 39, 49],
[ 59, 69, 79, 89],
[ 99, 109, 119, 129]]]]
should be reshaped to:
[[ 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43],
[ 14, 15, 16, 24, 25, 26, 34, 35, 36, 44, 45, 46],
[ 17, 18, 19, 27, 28, 29, 37, 38, 39, 47, 48, 49],
[ 51, 52, 53, 61, 62, 63, 71, 72, 73, 81, 82, 83],
[ 54, 55, 56, 64, 65, 66, 74, 75, 76, 84, 85, 86],
[ 57, 58, 59, 67, 68, 69, 77, 78, 79, 87, 88, 89],
[ 91, 92, 93, 101, 102, 103, 111, 112, 113, 121, 122, 123],
[ 94, 95, 96, 104, 105, 106, 114, 115, 116, 124, 125, 126],
[ 97, 98, 99, 107, 108, 109, 117, 118, 119, 127, 128, 129]]
'''
# E.g. [100, 24, 24, 10]: this shouldn't be reshaped like normal.
if array.shape[1] == array.shape[2] and array.shape[0] != array.shape[1]:
array = np.rollaxis(np.rollaxis(array, 2), 2)
block_height, block_width, in_channels = array.shape[:3]
rows = []
max_element_count = section_height * int(image_width / MIN_SQUARE_SIZE)
element_count = 0
for i in range(in_channels):
rows.append(array[:, :, i, :].reshape(block_height, -1, order='F'))
# This line should be left in this position. Gives it one extra row.
if element_count >= max_element_count and not self.config['show_all']:
break
element_count += block_height * in_channels * block_width
return np.vstack(rows)
def _reshape_irregular_array(self, array, section_height, image_width):
'''Reshapes arrays of ranks not in {1, 2, 4}
'''
section_area = section_height * image_width
flattened_array = np.ravel(array)
if not self.config['show_all']:
flattened_array = flattened_array[:int(section_area/MIN_SQUARE_SIZE)]
cell_count = np.prod(flattened_array.shape)
cell_area = section_area / cell_count
cell_side_length = max(1, floor(sqrt(cell_area)))
row_count = max(1, int(section_height / cell_side_length))
col_count = int(cell_count / row_count)
# Reshape the truncated array so that it has the same aspect ratio as
# the section.
# Truncate whatever remaining values there are that don't fit. Hopefully
# it doesn't matter that the last few (< section count) aren't there.
section = np.reshape(flattened_array[:row_count * col_count],
(row_count, col_count))
return section
def _determine_image_width(self, arrays, show_all):
final_width = IMAGE_WIDTH
if show_all:
for array in arrays:
rank = len(array.shape)
if rank == 1:
width = len(array)
elif rank == 2:
width = array.shape[1]
elif rank == 4:
width = array.shape[1] * array.shape[3]
else:
width = IMAGE_WIDTH
if width > final_width:
final_width = width
return final_width
def _determine_section_height(self, array, show_all):
rank = len(array.shape)
height = SECTION_HEIGHT
if show_all:
if rank == 1:
height = SECTION_HEIGHT
if rank == 2:
height = max(SECTION_HEIGHT, array.shape[0])
elif rank == 4:
height = max(SECTION_HEIGHT, array.shape[0] * array.shape[2])
else:
height = max(SECTION_HEIGHT, np.prod(array.shape) // IMAGE_WIDTH)
return height
def _arrays_to_sections(self, arrays):
'''
input: unprocessed numpy arrays.
returns: columns of the size that they will appear in the image, not scaled
for display. That needs to wait until after variance is computed.
'''
sections = []
sections_to_resize_later = {}
show_all = self.config['show_all']
image_width = self._determine_image_width(arrays, show_all)
for array_number, array in enumerate(arrays):
rank = len(array.shape)
section_height = self._determine_section_height(array, show_all)
if rank == 1:
section = np.atleast_2d(array)
elif rank == 2:
section = array
elif rank == 4:
section = self._reshape_conv_array(array, section_height, image_width)
else:
section = self._reshape_irregular_array(array,
section_height,
image_width)
# Only calculate variance for what we have to. In some cases (biases),
# the section is larger than the array, so we don't want to calculate
# variance for the same value over and over - better to resize later.
# About a 6-7x speedup for a big network with a big variance window.
section_size = section_height * image_width
array_size = np.prod(array.shape)
if section_size > array_size:
sections.append(section)
sections_to_resize_later[array_number] = section_height
else:
sections.append(im_util.resize(section, section_height, image_width))
self.sections_over_time.append(sections)
if self.config['mode'] == 'variance':
sections = self._sections_to_variance_sections(self.sections_over_time)
for array_number, height in sections_to_resize_later.items():
sections[array_number] = im_util.resize(sections[array_number],
height,
image_width)
return sections
def _sections_to_variance_sections(self, sections_over_time):
'''Computes the variance of corresponding sections over time.
Returns:
a list of np arrays.
'''
variance_sections = []
for i in range(len(sections_over_time[0])):
time_sections = [sections[i] for sections in sections_over_time]
variance = np.var(time_sections, axis=0)
variance_sections.append(variance)
return variance_sections
def _sections_to_image(self, sections):
padding_size = 5
sections = im_util.scale_sections(sections, self.config['scaling'])
final_stack = [sections[0]]
padding = np.zeros((padding_size, sections[0].shape[1]))
for section in sections[1:]:
final_stack.append(padding)
final_stack.append(section)
return np.vstack(final_stack).astype(np.uint8)
def _maybe_clear_deque(self):
'''Clears the deque if certain parts of the config have changed.'''
for config_item in ['values', 'mode', 'show_all']:
if self.config[config_item] != self.old_config[config_item]:
self.sections_over_time.clear()
break
self.old_config = self.config
window_size = self.config['window_size']
if window_size != self.sections_over_time.maxlen:
self.sections_over_time = deque(self.sections_over_time, window_size)
def _save_section_info(self, arrays, sections):
infos = []
if self.config['values'] == 'trainable_variables':
names = [x.name for x in tf.trainable_variables()]
else:
names = range(len(arrays))
for array, section, name in zip(arrays, sections, names):
info = {}
info['name'] = name
info['shape'] = str(array.shape)
info['min'] = '{:.3e}'.format(section.min())
info['mean'] = '{:.3e}'.format(section.mean())
info['max'] = '{:.3e}'.format(section.max())
info['range'] = '{:.3e}'.format(section.max() - section.min())
info['height'] = section.shape[0]
infos.append(info)
write_pickle(infos, '{}/{}'.format(self.logdir, SECTION_INFO_FILENAME))
def build_frame(self, arrays):
self._maybe_clear_deque()
arrays = arrays if isinstance(arrays, list) else [arrays]
sections = self._arrays_to_sections(arrays)
self._save_section_info(arrays, sections)
final_image = self._sections_to_image(sections)
final_image = im_util.apply_colormap(final_image, self.config['colormap'])
return final_image
def update(self, config):
self.config = config
|
webspinner/webspinner-gae-cms
|
refs/heads/master
|
gdata/tlslite/integration/POP3_TLS.py
|
271
|
"""TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
|
saschpe/rapport
|
refs/heads/master
|
test/unit/__init__.py
|
12133432
| |
leiferikb/bitpop
|
refs/heads/master
|
build/scripts/slave/swarming/__init__.py
|
12133432
| |
alxnov/ansible-modules-core
|
refs/heads/devel
|
source_control/__init__.py
|
12133432
| |
calfonso/ansible
|
refs/heads/devel
|
test/units/parsing/__init__.py
|
12133432
| |
yannrouillard/weboob
|
refs/heads/master
|
modules/ups/backend.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.parcel import ICapParcel
from weboob.tools.backend import BaseBackend
from .browser import UpsBrowser
__all__ = ['UpsBackend']
class UpsBackend(BaseBackend, ICapParcel):
NAME = 'ups'
DESCRIPTION = u'UPS website'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '0.i'
BROWSER = UpsBrowser
def get_parcel_tracking(self, id):
with self.browser:
return self.browser.get_tracking_info(id)
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27/Lib/site-packages/win32comext/propsys/pscon.py
|
21
|
# hand generated from propsys.h
PET_DISCRETEVALUE = 0
PET_RANGEDVALUE = 1
PET_DEFAULTVALUE = 2
PET_ENDRANGE = 3
PDTF_DEFAULT = 0
PDTF_MULTIPLEVALUES = 0x1
PDTF_ISINNATE = 0x2
PDTF_ISGROUP = 0x4
PDTF_CANGROUPBY = 0x8
PDTF_CANSTACKBY = 0x10
PDTF_ISTREEPROPERTY = 0x20
PDTF_INCLUDEINFULLTEXTQUERY = 0x40
PDTF_ISVIEWABLE = 0x80
PDTF_ISQUERYABLE = 0x100
PDTF_ISSYSTEMPROPERTY = 0x80000000
PDTF_MASK_ALL = 0x800001ff
PDVF_DEFAULT = 0
PDVF_CENTERALIGN = 0x1
PDVF_RIGHTALIGN = 0x2
PDVF_BEGINNEWGROUP = 0x4
PDVF_FILLAREA = 0x8
PDVF_SORTDESCENDING = 0x10
PDVF_SHOWONLYIFPRESENT = 0x20
PDVF_SHOWBYDEFAULT = 0x40
PDVF_SHOWINPRIMARYLIST = 0x80
PDVF_SHOWINSECONDARYLIST = 0x100
PDVF_HIDELABEL = 0x200
PDVF_HIDDEN = 0x800
PDVF_CANWRAP = 0x1000
PDVF_MASK_ALL = 0x1bff
PDDT_STRING = 0
PDDT_NUMBER = 1
PDDT_BOOLEAN = 2
PDDT_DATETIME = 3
PDDT_ENUMERATED = 4
PDGR_DISCRETE = 0
PDGR_ALPHANUMERIC = 1
PDGR_SIZE = 2
PDGR_DYNAMIC = 3
PDGR_DATE = 4
PDGR_PERCENT = 5
PDGR_ENUMERATED = 6
PDFF_DEFAULT = 0
PDFF_PREFIXNAME = 0x1
PDFF_FILENAME = 0x2
PDFF_ALWAYSKB = 0x4
PDFF_RESERVED_RIGHTTOLEFT = 0x8
PDFF_SHORTTIME = 0x10
PDFF_LONGTIME = 0x20
PDFF_HIDETIME = 0x40
PDFF_SHORTDATE = 0x80
PDFF_LONGDATE = 0x100
PDFF_HIDEDATE = 0x200
PDFF_RELATIVEDATE = 0x400
PDFF_USEEDITINVITATION = 0x800
PDFF_READONLY = 0x1000
PDFF_NOAUTOREADINGORDER = 0x2000
PDSD_GENERAL = 0
PDSD_A_Z = 1
PDSD_LOWEST_HIGHEST = 2
PDSD_SMALLEST_BIGGEST = 3
PDSD_OLDEST_NEWEST = 4
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDTF_DEFAULT = 0
PDTF_MULTIPLEVALUES = 0x1
PDTF_ISINNATE = 0x2
PDTF_ISGROUP = 0x4
PDTF_CANGROUPBY = 0x8
PDTF_CANSTACKBY = 0x10
PDTF_ISTREEPROPERTY = 0x20
PDTF_INCLUDEINFULLTEXTQUERY = 0x40
PDTF_ISVIEWABLE = 0x80
PDTF_ISQUERYABLE = 0x100
PDTF_ISSYSTEMPROPERTY = 0x80000000
PDTF_MASK_ALL = 0x800001ff
PDVF_DEFAULT = 0
PDVF_CENTERALIGN = 0x1
PDVF_RIGHTALIGN = 0x2
PDVF_BEGINNEWGROUP = 0x4
PDVF_FILLAREA = 0x8
PDVF_SORTDESCENDING = 0x10
PDVF_SHOWONLYIFPRESENT = 0x20
PDVF_SHOWBYDEFAULT = 0x40
PDVF_SHOWINPRIMARYLIST = 0x80
PDVF_SHOWINSECONDARYLIST = 0x100
PDVF_HIDELABEL = 0x200
PDVF_HIDDEN = 0x800
PDVF_CANWRAP = 0x1000
PDVF_MASK_ALL = 0x1bff
PDDT_STRING = 0
PDDT_NUMBER = 1
PDDT_BOOLEAN = 2
PDDT_DATETIME = 3
PDDT_ENUMERATED = 4
PDGR_DISCRETE = 0
PDGR_ALPHANUMERIC = 1
PDGR_SIZE = 2
PDGR_DYNAMIC = 3
PDGR_DATE = 4
PDGR_PERCENT = 5
PDGR_ENUMERATED = 6
PDFF_DEFAULT = 0
PDFF_PREFIXNAME = 0x1
PDFF_FILENAME = 0x2
PDFF_ALWAYSKB = 0x4
PDFF_RESERVED_RIGHTTOLEFT = 0x8
PDFF_SHORTTIME = 0x10
PDFF_LONGTIME = 0x20
PDFF_HIDETIME = 0x40
PDFF_SHORTDATE = 0x80
PDFF_LONGDATE = 0x100
PDFF_HIDEDATE = 0x200
PDFF_RELATIVEDATE = 0x400
PDFF_USEEDITINVITATION = 0x800
PDFF_READONLY = 0x1000
PDFF_NOAUTOREADINGORDER = 0x2000
PDSD_GENERAL = 0
PDSD_A_Z = 1
PDSD_LOWEST_HIGHEST = 2
PDSD_SMALLEST_BIGGEST = 3
PDSD_OLDEST_NEWEST = 4
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDDT_STRING = 0
PDDT_NUMBER = 1
PDDT_BOOLEAN = 2
PDDT_DATETIME = 3
PDDT_ENUMERATED = 4
PDGR_DISCRETE = 0
PDGR_ALPHANUMERIC = 1
PDGR_SIZE = 2
PDGR_DYNAMIC = 3
PDGR_DATE = 4
PDGR_PERCENT = 5
PDGR_ENUMERATED = 6
PDFF_DEFAULT = 0
PDFF_PREFIXNAME = 0x1
PDFF_FILENAME = 0x2
PDFF_ALWAYSKB = 0x4
PDFF_RESERVED_RIGHTTOLEFT = 0x8
PDFF_SHORTTIME = 0x10
PDFF_LONGTIME = 0x20
PDFF_HIDETIME = 0x40
PDFF_SHORTDATE = 0x80
PDFF_LONGDATE = 0x100
PDFF_HIDEDATE = 0x200
PDFF_RELATIVEDATE = 0x400
PDFF_USEEDITINVITATION = 0x800
PDFF_READONLY = 0x1000
PDFF_NOAUTOREADINGORDER = 0x2000
PDSD_GENERAL = 0
PDSD_A_Z = 1
PDSD_LOWEST_HIGHEST = 2
PDSD_SMALLEST_BIGGEST = 3
PDSD_OLDEST_NEWEST = 4
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDFF_DEFAULT = 0
PDFF_PREFIXNAME = 0x1
PDFF_FILENAME = 0x2
PDFF_ALWAYSKB = 0x4
PDFF_RESERVED_RIGHTTOLEFT = 0x8
PDFF_SHORTTIME = 0x10
PDFF_LONGTIME = 0x20
PDFF_HIDETIME = 0x40
PDFF_SHORTDATE = 0x80
PDFF_LONGDATE = 0x100
PDFF_HIDEDATE = 0x200
PDFF_RELATIVEDATE = 0x400
PDFF_USEEDITINVITATION = 0x800
PDFF_READONLY = 0x1000
PDFF_NOAUTOREADINGORDER = 0x2000
PDSD_GENERAL = 0
PDSD_A_Z = 1
PDSD_LOWEST_HIGHEST = 2
PDSD_SMALLEST_BIGGEST = 3
PDSD_OLDEST_NEWEST = 4
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDSIF_DEFAULT = 0
PDSIF_ININVERTEDINDEX = 0x1
PDSIF_ISCOLUMN = 0x2
PDSIF_ISCOLUMNSPARSE = 0x4
PDCIT_NONE = 0
PDCIT_ONDISK = 1
PDCIT_INMEMORY = 2
PDEF_ALL = 0
PDEF_SYSTEM = 1
PDEF_NONSYSTEM = 2
PDEF_VIEWABLE = 3
PDEF_QUERYABLE = 4
PDEF_INFULLTEXTQUERY = 5
PDEF_COLUMN = 6
PKEY_PIDSTR_MAX = 10 # will take care of any long integer value
#define GUIDSTRING_MAX (1 + 8 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 12 + 1 + 1) // "{12345678-1234-1234-1234-123456789012}"
GUIDSTRING_MAX = (1 + 8 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 12 + 1 + 1) # hrm ???
#define PKEYSTR_MAX (GUIDSTRING_MAX + 1 + PKEY_PIDSTR_MAX)
PKEYSTR_MAX = GUIDSTRING_MAX + 1 + PKEY_PIDSTR_MAX
|
espadrine/opera
|
refs/heads/master
|
chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/dde/ddeserver.py
|
34
|
# 'Request' example added jjk 11/20/98
import win32ui
from pywin.mfc import object
import dde
class MySystemTopic(object.Object):
def __init__(self):
object.Object.__init__(self, dde.CreateServerSystemTopic())
def Exec(self, cmd):
print "System Topic asked to exec", cmd
class MyOtherTopic(object.Object):
def __init__(self, topicName):
object.Object.__init__(self, dde.CreateTopic(topicName))
def Exec(self, cmd):
print "Other Topic asked to exec", cmd
class MyRequestTopic(object.Object):
def __init__(self, topicName):
topic = dde.CreateTopic(topicName)
topic.AddItem(dde.CreateStringItem(""))
object.Object.__init__(self, topic)
def Request(self, aString):
print "Request Topic asked to compute length of:", aString
return(str(len(aString)))
server = dde.CreateServer()
server.AddTopic(MySystemTopic())
server.AddTopic(MyOtherTopic("RunAnyCommand"))
server.AddTopic(MyRequestTopic("ComputeStringLength"))
server.Create('RunAny')
while 1:
win32ui.PumpWaitingMessages(0, -1)
|
yazanobeidi/sentience
|
refs/heads/master
|
src/python/memory/dnc/util.py
|
3
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC util ops and modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def batch_invert_permutation(permutations):
"""Returns batched `tf.invert_permutation` for every row in `permutations`."""
with tf.name_scope('batch_invert_permutation', values=[permutations]):
unpacked = tf.unstack(permutations)
inverses = [tf.invert_permutation(permutation) for permutation in unpacked]
return tf.stack(inverses)
def batch_gather(values, indices):
"""Returns batched `tf.gather` for every row in the input."""
with tf.name_scope('batch_gather', values=[values, indices]):
unpacked = zip(tf.unstack(values), tf.unstack(indices))
result = [tf.gather(value, index) for value, index in unpacked]
return tf.stack(result)
def one_hot(length, index):
"""Return an nd array of given `length` filled with 0s and a 1 at `index`."""
result = np.zeros(length)
result[index] = 1
return result
|
nectR-Tutoring/nectr
|
refs/heads/new_development
|
nectr/skills/models.py
|
1
|
from django.db import models
# Create your models here.
class Skills(models.Model):
skill = models.CharField(max_length=30)
def __str__(self):
return self.skill
|
ArcherSys/ArcherSys
|
refs/heads/master
|
Lib/encodings/idna.py
|
1
|
<<<<<<< HEAD
<<<<<<< HEAD
# This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
import stringprep, re, codecs
from unicodedata import ucd_3_2_0 as unicodedata
# IDNA section 3.1
dots = re.compile("[\u002E\u3002\uFF0E\uFF61]")
# IDNA section 5
ace_prefix = b"xn--"
sace_prefix = "xn--"
# This assumes query strings, so AllowUnassigned is true
def nameprep(label):
# Map
newlabel = []
for c in label:
if stringprep.in_table_b1(c):
# Map to nothing
continue
newlabel.append(stringprep.map_table_b2(c))
label = "".join(newlabel)
# Normalize
label = unicodedata.normalize("NFKC", label)
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
stringprep.in_table_c22(c) or \
stringprep.in_table_c3(c) or \
stringprep.in_table_c4(c) or \
stringprep.in_table_c5(c) or \
stringprep.in_table_c6(c) or \
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = [stringprep.in_table_d1(x) for x in label]
for c in RandAL:
if c:
# There is a RandAL char in the string. Must perform further
# tests:
# 1) The characters in section 5.8 MUST be prohibited.
# This is table C.8, which was already checked
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if any(stringprep.in_table_d2(x) for x in label):
raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
raise UnicodeError("Violation of BIDI requirement 3")
return label
def ToASCII(label):
try:
# Step 1: try ASCII
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 3: UseSTD3ASCIIRules is false, so
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
# Step 3: UseSTD3ASCIIRules is false
# Step 4: try ASCII
try:
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(sace_prefix):
raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
# Step 7: Prepend ACE prefix
label = ace_prefix + label
# Step 8: Check size
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
if isinstance(label, bytes):
pure_ascii = True
else:
try:
label = label.encode("ascii")
pure_ascii = True
except UnicodeError:
pure_ascii = False
if not pure_ascii:
# Step 2: Perform nameprep
label = nameprep(label)
# It doesn't say this, but apparently, it should be ASCII now
try:
label = label.encode("ascii")
except UnicodeError:
raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return str(label, "ascii")
# Step 4: Remove ACE prefix
label1 = label[len(ace_prefix):]
# Step 5: Decode using PUNYCODE
result = label1.decode("punycode")
# Step 6: Apply ToASCII
label2 = ToASCII(result)
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if str(label, "ascii").lower() != str(label2, "ascii"):
raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return b'', 0
try:
result = input.encode('ascii')
except UnicodeEncodeError:
pass
else:
# ASCII name: fast path
labels = result.split(b'.')
for label in labels[:-1]:
if not (0 < len(label) < 64):
raise UnicodeError("label empty or too long")
if len(labels[-1]) >= 64:
raise UnicodeError("label too long")
return result, len(input)
result = bytearray()
labels = dots.split(input)
if labels and not labels[-1]:
trailing_dot = b'.'
del labels[-1]
else:
trailing_dot = b''
for label in labels:
if result:
# Join with U+002E
result.extend(b'.')
result.extend(ToASCII(label))
return bytes(result+trailing_dot), len(input)
def decode(self, input, errors='strict'):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return "", 0
# IDNA allows decoding to operate on Unicode strings, too.
if not isinstance(input, bytes):
# XXX obviously wrong, see #3232
input = bytes(input)
if ace_prefix not in input:
# Fast path
try:
return input.decode('ascii'), len(input)
except UnicodeDecodeError:
pass
labels = input.split(b".")
if labels and len(labels[-1]) == 0:
trailing_dot = '.'
del labels[-1]
else:
trailing_dot = ''
result = []
for label in labels:
result.append(ToUnicode(label))
return ".".join(result)+trailing_dot, len(input)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input, errors, final):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return (b'', 0)
labels = dots.split(input)
trailing_dot = b''
if labels:
if not labels[-1]:
trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = b'.'
result = bytearray()
size = 0
for label in labels:
if size:
# Join with U+002E
result.extend(b'.')
size += 1
result.extend(ToASCII(label))
size += len(label)
result += trailing_dot
size += len(trailing_dot)
return (bytes(result), size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return ("", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(input, str):
labels = dots.split(input)
else:
# Must be ASCII string
input = str(input, "ascii")
labels = input.split(".")
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ToUnicode(label))
if size:
size += 1
size += len(label)
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
=======
# This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
import stringprep, re, codecs
from unicodedata import ucd_3_2_0 as unicodedata
# IDNA section 3.1
dots = re.compile("[\u002E\u3002\uFF0E\uFF61]")
# IDNA section 5
ace_prefix = b"xn--"
sace_prefix = "xn--"
# This assumes query strings, so AllowUnassigned is true
def nameprep(label):
# Map
newlabel = []
for c in label:
if stringprep.in_table_b1(c):
# Map to nothing
continue
newlabel.append(stringprep.map_table_b2(c))
label = "".join(newlabel)
# Normalize
label = unicodedata.normalize("NFKC", label)
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
stringprep.in_table_c22(c) or \
stringprep.in_table_c3(c) or \
stringprep.in_table_c4(c) or \
stringprep.in_table_c5(c) or \
stringprep.in_table_c6(c) or \
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = [stringprep.in_table_d1(x) for x in label]
for c in RandAL:
if c:
# There is a RandAL char in the string. Must perform further
# tests:
# 1) The characters in section 5.8 MUST be prohibited.
# This is table C.8, which was already checked
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if any(stringprep.in_table_d2(x) for x in label):
raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
raise UnicodeError("Violation of BIDI requirement 3")
return label
def ToASCII(label):
try:
# Step 1: try ASCII
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 3: UseSTD3ASCIIRules is false, so
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
# Step 3: UseSTD3ASCIIRules is false
# Step 4: try ASCII
try:
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(sace_prefix):
raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
# Step 7: Prepend ACE prefix
label = ace_prefix + label
# Step 8: Check size
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
if isinstance(label, bytes):
pure_ascii = True
else:
try:
label = label.encode("ascii")
pure_ascii = True
except UnicodeError:
pure_ascii = False
if not pure_ascii:
# Step 2: Perform nameprep
label = nameprep(label)
# It doesn't say this, but apparently, it should be ASCII now
try:
label = label.encode("ascii")
except UnicodeError:
raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return str(label, "ascii")
# Step 4: Remove ACE prefix
label1 = label[len(ace_prefix):]
# Step 5: Decode using PUNYCODE
result = label1.decode("punycode")
# Step 6: Apply ToASCII
label2 = ToASCII(result)
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if str(label, "ascii").lower() != str(label2, "ascii"):
raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return b'', 0
try:
result = input.encode('ascii')
except UnicodeEncodeError:
pass
else:
# ASCII name: fast path
labels = result.split(b'.')
for label in labels[:-1]:
if not (0 < len(label) < 64):
raise UnicodeError("label empty or too long")
if len(labels[-1]) >= 64:
raise UnicodeError("label too long")
return result, len(input)
result = bytearray()
labels = dots.split(input)
if labels and not labels[-1]:
trailing_dot = b'.'
del labels[-1]
else:
trailing_dot = b''
for label in labels:
if result:
# Join with U+002E
result.extend(b'.')
result.extend(ToASCII(label))
return bytes(result+trailing_dot), len(input)
def decode(self, input, errors='strict'):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return "", 0
# IDNA allows decoding to operate on Unicode strings, too.
if not isinstance(input, bytes):
# XXX obviously wrong, see #3232
input = bytes(input)
if ace_prefix not in input:
# Fast path
try:
return input.decode('ascii'), len(input)
except UnicodeDecodeError:
pass
labels = input.split(b".")
if labels and len(labels[-1]) == 0:
trailing_dot = '.'
del labels[-1]
else:
trailing_dot = ''
result = []
for label in labels:
result.append(ToUnicode(label))
return ".".join(result)+trailing_dot, len(input)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input, errors, final):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return (b'', 0)
labels = dots.split(input)
trailing_dot = b''
if labels:
if not labels[-1]:
trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = b'.'
result = bytearray()
size = 0
for label in labels:
if size:
# Join with U+002E
result.extend(b'.')
size += 1
result.extend(ToASCII(label))
size += len(label)
result += trailing_dot
size += len(trailing_dot)
return (bytes(result), size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return ("", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(input, str):
labels = dots.split(input)
else:
# Must be ASCII string
input = str(input, "ascii")
labels = input.split(".")
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ToUnicode(label))
if size:
size += 1
size += len(label)
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
import stringprep, re, codecs
from unicodedata import ucd_3_2_0 as unicodedata
# IDNA section 3.1
dots = re.compile("[\u002E\u3002\uFF0E\uFF61]")
# IDNA section 5
ace_prefix = b"xn--"
sace_prefix = "xn--"
# This assumes query strings, so AllowUnassigned is true
def nameprep(label):
# Map
newlabel = []
for c in label:
if stringprep.in_table_b1(c):
# Map to nothing
continue
newlabel.append(stringprep.map_table_b2(c))
label = "".join(newlabel)
# Normalize
label = unicodedata.normalize("NFKC", label)
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
stringprep.in_table_c22(c) or \
stringprep.in_table_c3(c) or \
stringprep.in_table_c4(c) or \
stringprep.in_table_c5(c) or \
stringprep.in_table_c6(c) or \
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = [stringprep.in_table_d1(x) for x in label]
for c in RandAL:
if c:
# There is a RandAL char in the string. Must perform further
# tests:
# 1) The characters in section 5.8 MUST be prohibited.
# This is table C.8, which was already checked
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if any(stringprep.in_table_d2(x) for x in label):
raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
raise UnicodeError("Violation of BIDI requirement 3")
return label
def ToASCII(label):
try:
# Step 1: try ASCII
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 3: UseSTD3ASCIIRules is false, so
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
# Step 3: UseSTD3ASCIIRules is false
# Step 4: try ASCII
try:
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(sace_prefix):
raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
# Step 7: Prepend ACE prefix
label = ace_prefix + label
# Step 8: Check size
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
if isinstance(label, bytes):
pure_ascii = True
else:
try:
label = label.encode("ascii")
pure_ascii = True
except UnicodeError:
pure_ascii = False
if not pure_ascii:
# Step 2: Perform nameprep
label = nameprep(label)
# It doesn't say this, but apparently, it should be ASCII now
try:
label = label.encode("ascii")
except UnicodeError:
raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return str(label, "ascii")
# Step 4: Remove ACE prefix
label1 = label[len(ace_prefix):]
# Step 5: Decode using PUNYCODE
result = label1.decode("punycode")
# Step 6: Apply ToASCII
label2 = ToASCII(result)
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if str(label, "ascii").lower() != str(label2, "ascii"):
raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return b'', 0
try:
result = input.encode('ascii')
except UnicodeEncodeError:
pass
else:
# ASCII name: fast path
labels = result.split(b'.')
for label in labels[:-1]:
if not (0 < len(label) < 64):
raise UnicodeError("label empty or too long")
if len(labels[-1]) >= 64:
raise UnicodeError("label too long")
return result, len(input)
result = bytearray()
labels = dots.split(input)
if labels and not labels[-1]:
trailing_dot = b'.'
del labels[-1]
else:
trailing_dot = b''
for label in labels:
if result:
# Join with U+002E
result.extend(b'.')
result.extend(ToASCII(label))
return bytes(result+trailing_dot), len(input)
def decode(self, input, errors='strict'):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return "", 0
# IDNA allows decoding to operate on Unicode strings, too.
if not isinstance(input, bytes):
# XXX obviously wrong, see #3232
input = bytes(input)
if ace_prefix not in input:
# Fast path
try:
return input.decode('ascii'), len(input)
except UnicodeDecodeError:
pass
labels = input.split(b".")
if labels and len(labels[-1]) == 0:
trailing_dot = '.'
del labels[-1]
else:
trailing_dot = ''
result = []
for label in labels:
result.append(ToUnicode(label))
return ".".join(result)+trailing_dot, len(input)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input, errors, final):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return (b'', 0)
labels = dots.split(input)
trailing_dot = b''
if labels:
if not labels[-1]:
trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = b'.'
result = bytearray()
size = 0
for label in labels:
if size:
# Join with U+002E
result.extend(b'.')
size += 1
result.extend(ToASCII(label))
size += len(label)
result += trailing_dot
size += len(trailing_dot)
return (bytes(result), size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return ("", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(input, str):
labels = dots.split(input)
else:
# Must be ASCII string
input = str(input, "ascii")
labels = input.split(".")
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ToUnicode(label))
if size:
size += 1
size += len(label)
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
alanch-ms/PTVS
|
refs/heads/master
|
Python/Tests/TestData/InconsistentIndentation/Program.py
|
7
|
def f():
print('hello')
print('goodbye')
|
romses/LXC-Web-Panel
|
refs/heads/master
|
lwp/views/main.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import re
import time
import socket
import subprocess
import ConfigParser
from flask import Blueprint, request, session, g, redirect, url_for, abort, render_template, flash, jsonify
import lwp
import lwp.lxclite as lxc
from lwp.utils import query_db, if_logged_in, get_bucket_token, hash_passwd, config, cgroup_ext
from lwp.views.auth import AUTH
# TODO: see if we can move this block somewhere better
try:
USE_BUCKET = config.getboolean('global', 'buckets')
BUCKET_HOST = config.get('buckets', 'buckets_host')
BUCKET_PORT = config.get('buckets', 'buckets_port')
except ConfigParser.NoOptionError:
USE_BUCKET = False
print("- Bucket feature disabled")
storage_repos = config.items('storage_repository')
# Flask module
mod = Blueprint('main', __name__)
@mod.route('/')
@mod.route('/home')
@if_logged_in()
def home():
"""
home page function
"""
listx = lxc.listx()
containers_all = []
for status in ('RUNNING', 'FROZEN', 'STOPPED'):
containers_by_status = []
for container in listx[status]:
container_info = {
'name': container,
'settings': lwp.get_container_settings(container, status),
'memusg': 0,
'bucket': get_bucket_token(container)
}
containers_by_status.append(container_info)
containers_all.append({
'status': status.lower(),
'containers': containers_by_status
})
return render_template('index.html', containers=lxc.ls(), containers_all=containers_all, dist=lwp.name_distro(),
host=socket.gethostname(), templates=lwp.get_templates_list(), storage_repos=storage_repos,
auth=AUTH)
@mod.route('/about')
@if_logged_in()
def about():
"""
about page
"""
return render_template('about.html', containers=lxc.ls(), version=lwp.check_version())
@mod.route('/<container>/edit', methods=['POST', 'GET'])
@if_logged_in()
def edit(container=None):
"""
edit containers page and actions if form post request
"""
host_memory = lwp.host_memory_usage()
cfg = lwp.get_container_settings(container)
if request.method == 'POST':
form = request.form.copy()
if form['bucket'] != get_bucket_token(container):
g.db.execute("INSERT INTO machine(machine_name, bucket_token) VALUES (?, ?)", [container, form['bucket']])
g.db.commit()
flash(u'Bucket config for %s saved' % container, 'success')
# convert boolean in correct value for lxc, if checkbox is inset value is not submitted inside POST
form['flags'] = 'up' if 'flags' in form else 'down'
form['start_auto'] = '1' if 'start_auto' in form else '0'
# if memlimits/memswlimit is at max values unset form values
if int(form['memlimit']) == host_memory['total']:
form['memlimit'] = ''
if int(form['swlimit']) == host_memory['total'] * 2:
form['swlimit'] = ''
for option in form.keys():
# if the key is supported AND is different
if option in cfg.keys() and form[option] != cfg[option]:
# validate value with regex
if re.match(cgroup_ext[option][1], form[option]):
lwp.push_config_value(cgroup_ext[option][0], form[option], container=container)
flash(cgroup_ext[option][2], 'success')
else:
flash('Cannot validate value for option {}. Unsaved!'.format(option), 'error')
# we should re-read container configuration now to be coherent with the newly saved values
cfg = lwp.get_container_settings(container)
info = lxc.info(container)
infos = {'status': info['state'], 'pid': info['pid'], 'memusg': lwp.memory_usage(container)}
# prepare a regex dict from cgroups_ext definition
regex = {}
for k, v in cgroup_ext.items():
regex[k] = v[1]
return render_template('edit.html', containers=lxc.ls(), container=container, infos=infos, settings=cfg, host_memory=host_memory, storage_repos=storage_repos, regex=regex)
@mod.route('/settings/lxc-net', methods=['POST', 'GET'])
@if_logged_in()
def lxc_net():
"""
lxc-net (/etc/default/lxc) settings page and actions if form post request
"""
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
if lxc.running() == []:
cfg = lwp.get_net_settings()
ip_regex = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
form = {}
for key in ['bridge', 'address', 'netmask', 'network', 'range', 'max']:
form[key] = request.form.get(key, None)
form['use'] = request.form.get('use', None)
if form['use'] != cfg['use']:
lwp.push_net_value('USE_LXC_BRIDGE', 'true' if form['use'] else 'false')
if form['bridge'] and form['bridge'] != cfg['bridge'] and \
re.match('^[a-zA-Z0-9_-]+$', form['bridge']):
lwp.push_net_value('LXC_BRIDGE', form['bridge'])
if form['address'] and form['address'] != cfg['address'] and \
re.match('^%s$' % ip_regex, form['address']):
lwp.push_net_value('LXC_ADDR', form['address'])
if form['netmask'] and form['netmask'] != cfg['netmask'] and \
re.match('^%s$' % ip_regex, form['netmask']):
lwp.push_net_value('LXC_NETMASK', form['netmask'])
if form['network'] and form['network'] != cfg['network'] and \
re.match('^%s(?:/\d{1,2}|)$' % ip_regex, form['network']):
lwp.push_net_value('LXC_NETWORK', form['network'])
if form['range'] and form['range'] != cfg['range'] and \
re.match('^%s,%s$' % (ip_regex, ip_regex), form['range']):
lwp.push_net_value('LXC_DHCP_RANGE', form['range'])
if form['max'] and form['max'] != cfg['max'] and \
re.match('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', form['max']):
lwp.push_net_value('LXC_DHCP_MAX', form['max'])
if lwp.net_restart() == 0:
flash(u'LXC Network settings applied successfully!', 'success')
else:
flash(u'Failed to restart LXC networking.', 'error')
else:
flash(u'Stop all containers before restart lxc-net.', 'warning')
return render_template('lxc-net.html', containers=lxc.ls(), cfg=lwp.get_net_settings(), running=lxc.running())
@mod.route('/lwp/users', methods=['POST', 'GET'])
@if_logged_in()
def lwp_users():
"""
returns users and get posts request : can edit or add user in page.
this funtction uses sqlite3
"""
if session['su'] != 'Yes':
return abort(403)
if AUTH != 'database':
return abort(403, 'You are using an auth method other that database.')
try:
trash = request.args.get('trash')
except KeyError:
trash = 0
su_users = query_db("SELECT COUNT(id) as num FROM users WHERE su='Yes'", [], one=True)
if request.args.get('token') == session.get('token') and int(trash) == 1 and request.args.get('userid') and \
request.args.get('username'):
nb_users = query_db("SELECT COUNT(id) as num FROM users", [], one=True)
if nb_users['num'] > 1:
if su_users['num'] <= 1:
su_user = query_db("SELECT username FROM users WHERE su='Yes'", [], one=True)
if su_user['username'] == request.args.get('username'):
flash(u'Can\'t delete the last admin user : %s' % request.args.get('username'), 'error')
return redirect(url_for('main.lwp_users'))
g.db.execute("DELETE FROM users WHERE id=? AND username=?", [request.args.get('userid'),
request.args.get('username')])
g.db.commit()
flash(u'Deleted %s' % request.args.get('username'), 'success')
return redirect(url_for('main.lwp_users'))
flash(u'Can\'t delete the last user!', 'error')
return redirect(url_for('main.lwp_users'))
if request.method == 'POST':
users = query_db('SELECT id, name, username, su FROM users ORDER BY id ASC')
if request.form['newUser'] == 'True':
if not request.form['username'] in [user['username'] for user in users]:
if re.match('^\w+$', request.form['username']) and request.form['password1']:
if request.form['password1'] == request.form['password2']:
if request.form['name']:
if re.match('[a-z A-Z0-9]{3,32}', request.form['name']):
g.db.execute("INSERT INTO users (name, username, password) VALUES (?, ?, ?)",
[request.form['name'], request.form['username'],
hash_passwd(request.form['password1'])])
g.db.commit()
else:
flash(u'Invalid name!', 'error')
else:
g.db.execute("INSERT INTO users (username, password) VALUES (?, ?)",
[request.form['username'], hash_passwd(request.form['password1'])])
g.db.commit()
flash(u'Created %s' % request.form['username'], 'success')
else:
flash(u'No password match', 'error')
else:
flash(u'Invalid username or password!', 'error')
else:
flash(u'Username already exist!', 'error')
elif request.form['newUser'] == 'False':
if request.form['password1'] == request.form['password2']:
if re.match('[a-z A-Z0-9]{3,32}', request.form['name']):
if su_users['num'] <= 1:
su = 'Yes'
else:
try:
su = request.form['su']
except KeyError:
su = 'No'
if not request.form['name']:
g.db.execute("UPDATE users SET name='', su=? WHERE username=?", [su, request.form['username']])
g.db.commit()
elif request.form['name'] and not request.form['password1'] and not request.form['password2']:
g.db.execute("UPDATE users SET name=?, su=? WHERE username=?",
[request.form['name'], su, request.form['username']])
g.db.commit()
elif request.form['name'] and request.form['password1'] and request.form['password2']:
g.db.execute("UPDATE users SET name=?, password=?, su=? WHERE username=?",
[request.form['name'], hash_passwd(request.form['password1']), su,
request.form['username']])
g.db.commit()
elif request.form['password1'] and request.form['password2']:
g.db.execute("UPDATE users SET password=?, su=? WHERE username=?",
[hash_passwd(request.form['password1']), su, request.form['username']])
g.db.commit()
flash(u'Updated', 'success')
else:
flash(u'Invalid name!', 'error')
else:
flash(u'No password match', 'error')
else:
flash(u'Unknown error!', 'error')
users = query_db("SELECT id, name, username, su FROM users ORDER BY id ASC")
nb_users = query_db("SELECT COUNT(id) as num FROM users", [], one=True)
su_users = query_db("SELECT COUNT(id) as num FROM users WHERE su='Yes'", [], one=True)
return render_template('users.html', containers=lxc.ls(), users=users, nb_users=nb_users, su_users=su_users)
@mod.route('/lwp/tokens', methods=['POST', 'GET'])
@if_logged_in()
def lwp_tokens():
"""
returns api tokens info and get posts request: can show/delete or add token in page.
this function uses sqlite3, require admin privilege
"""
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
if request.form['action'] == 'add':
# we want to add a new token
token = request.form['token']
description = request.form['description']
username = session['username'] # we should save the username due to ldap option
g.db.execute("INSERT INTO api_tokens (username, token, description) VALUES(?, ?, ?)", [username, token,
description])
g.db.commit()
flash(u'Token %s successfully added!' % token, 'success')
if request.args.get('action') == 'del':
token = request.args['token']
g.db.execute("DELETE FROM api_tokens WHERE token=?", [token])
g.db.commit()
flash(u'Token %s successfully deleted!' % token, 'success')
tokens = query_db("SELECT description, token, username FROM api_tokens ORDER BY token DESC")
return render_template('tokens.html', containers=lxc.ls(), tokens=tokens)
@mod.route('/checkconfig')
@if_logged_in()
def checkconfig():
"""
returns the display of lxc-checkconfig command
"""
if session['su'] != 'Yes':
return abort(403)
return render_template('checkconfig.html', containers=lxc.ls(), cfg=lxc.checkconfig())
@mod.route('/action', methods=['GET'])
@if_logged_in()
def action():
"""
manage all actions related to containers
lxc-start, lxc-stop, etc...
"""
act = request.args['action']
name = request.args['name']
# TODO: refactor this method, it's horrible to read
if act == 'start':
try:
if lxc.start(name) == 0:
time.sleep(1) # Fix bug : "the container is randomly not displayed in overview list after a boot"
flash(u'Container %s started successfully!' % name, 'success')
else:
flash(u'Unable to start %s!' % name, 'error')
except lxc.ContainerAlreadyRunning:
flash(u'Container %s is already running!' % name, 'error')
elif act == 'stop':
try:
if lxc.stop(name) == 0:
flash(u'Container %s stopped successfully!' % name, 'success')
else:
flash(u'Unable to stop %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s is already stopped!' % name, 'error')
elif act == 'freeze':
try:
if lxc.freeze(name) == 0:
flash(u'Container %s frozen successfully!' % name, 'success')
else:
flash(u'Unable to freeze %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s not running!' % name, 'error')
elif act == 'unfreeze':
try:
if lxc.unfreeze(name) == 0:
flash(u'Container %s unfrozen successfully!' % name, 'success')
else:
flash(u'Unable to unfeeze %s!' % name, 'error')
except lxc.ContainerNotRunning:
flash(u'Container %s not frozen!' % name, 'error')
elif act == 'destroy':
if session['su'] != 'Yes':
return abort(403)
try:
if lxc.destroy(name) == 0:
flash(u'Container %s destroyed successfully!' % name, 'success')
else:
flash(u'Unable to destroy %s!' % name, 'error')
except lxc.ContainerDoesntExists:
flash(u'The Container %s does not exists!' % name, 'error')
elif act == 'reboot' and name == 'host':
if session['su'] != 'Yes':
return abort(403)
msg = '\v*** LXC Web Panel *** \
\nReboot from web panel'
try:
subprocess.check_call('/sbin/shutdown -r now \'%s\'' % msg, shell=True)
flash(u'System will now restart!', 'success')
except subprocess.CalledProcessError:
flash(u'System error!', 'error')
elif act == 'push':
# TODO: implement push action
pass
try:
if request.args['from'] == 'edit':
return redirect(url_for('main.edit', container=name))
else:
return redirect(url_for('main.home'))
except KeyError:
return redirect(url_for('main.home'))
@mod.route('/action/create-container', methods=['GET', 'POST'])
@if_logged_in()
def create_container():
"""
verify all forms to create a container
"""
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
name = request.form['name']
template = request.form['template']
command = request.form['command']
if re.match('^(?!^containers$)|[a-zA-Z0-9_-]+$', name):
storage_method = request.form['backingstore']
if storage_method == 'default':
try:
if lxc.create(name, template=template, xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error! %s' % name, 'error')
elif storage_method == 'directory':
directory = request.form['dir']
if re.match('^/[a-zA-Z0-9_/-]+$', directory) and directory != '':
try:
if lxc.create(name, template=template, storage='dir --dir %s' % directory, xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error! %s' % name, 'error')
elif storage_method == 'btrfs':
try:
if lxc.create(name, template=template, storage='btrfs', xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error! %s' % name, 'error')
elif storage_method == 'zfs':
zfs = request.form['zpoolname']
if re.match('^[a-zA-Z0-9_-]+$', zfs) and zfs != '':
try:
if lxc.create(name, template=template, storage='zfs --zfsroot %s' % zfs, xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The Container %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error! %s' % name, 'error')
elif storage_method == 'lvm':
lvname = request.form['lvname']
vgname = request.form['vgname']
fstype = request.form['fstype']
fssize = request.form['fssize']
storage_options = 'lvm'
if re.match('^[a-zA-Z0-9_-]+$', lvname) and lvname != '':
storage_options += ' --lvname %s' % lvname
if re.match('^[a-zA-Z0-9_-]+$', vgname) and vgname != '':
storage_options += ' --vgname %s' % vgname
if re.match('^[a-z0-9]+$', fstype) and fstype != '':
storage_options += ' --fstype %s' % fstype
if re.match('^[0-9][G|M]$', fssize) and fssize != '':
storage_options += ' --fssize %s' % fssize
try:
if lxc.create(name, template=template, storage=storage_options, xargs=command) == 0:
flash(u'Container %s created successfully!' % name, 'success')
else:
flash(u'Failed to create %s!' % name, 'error')
except lxc.ContainerAlreadyExists:
flash(u'The container/logical volume %s is already created!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Error! %s' % name, 'error')
else:
flash(u'Missing parameters to create container!', 'error')
else:
if name == '':
flash(u'Please enter a container name!', 'error')
else:
flash(u'Invalid name for \"%s\"!' % name, 'error')
return redirect(url_for('main.home'))
@mod.route('/action/clone-container', methods=['GET', 'POST'])
@if_logged_in()
def clone_container():
"""
verify all forms to clone a container
"""
if session['su'] != 'Yes':
return abort(403)
if request.method == 'POST':
orig = request.form['orig']
name = request.form['name']
try:
snapshot = request.form['snapshot']
if snapshot == 'True':
snapshot = True
except KeyError:
snapshot = False
if re.match('^(?!^containers$)|[a-zA-Z0-9_-]+$', name):
out = None
try:
out = lxc.clone(orig=orig, new=name, snapshot=snapshot)
except lxc.ContainerAlreadyExists:
flash(u'The Container %s already exists!' % name, 'error')
except subprocess.CalledProcessError:
flash(u'Can\'t snapshot a directory', 'error')
if out and out == 0:
flash(u'Container %s cloned into %s successfully!' % (orig, name), 'success')
elif out and out != 0:
flash(u'Failed to clone %s into %s!' % (orig, name), 'error')
else:
if name == '':
flash(u'Please enter a container name!', 'error')
else:
flash(u'Invalid name for \"%s\"!' % name, 'error')
return redirect(url_for('main.home'))
@mod.route('/action/backup-container', methods=['GET', 'POST'])
@if_logged_in()
def backup_container():
"""
Verify the form to backup a container
"""
if request.method == 'POST':
container = request.form['orig']
sr_type = request.form['dest']
if 'push' in request.form:
push = request.form['push']
else:
push = False
sr_path = None
for sr in storage_repos:
if sr_type in sr:
sr_path = sr[1]
break
out = None
try:
backup_file = lxc.backup(container=container, sr_type=sr_type, destination=sr_path)
bucket_token = get_bucket_token(container)
if push and bucket_token and USE_BUCKET:
os.system('curl http://{}:{}/{} -F file=@{}'.format(BUCKET_HOST, BUCKET_PORT, bucket_token, backup_file))
except lxc.ContainerDoesntExists:
flash(u'The Container %s does not exist !' % container, 'error')
except lxc.DirectoryDoesntExists:
flash(u'Local backup directory "%s" does not exist !' % sr_path, 'error')
except lxc.NFSDirectoryNotMounted:
flash(u'NFS repository "%s" not mounted !' % sr_path, 'error')
except subprocess.CalledProcessError:
flash(u'Error during transfert !', 'error')
except:
flash(u'Error during transfert !', 'error')
if out == 0:
flash(u'Container %s backed up successfully' % container, 'success')
elif out != 0:
flash(u'Failed to backup %s container' % container, 'error')
return redirect(url_for('main.home'))
@mod.route('/_refresh_info')
@if_logged_in()
def refresh_info():
return jsonify({'cpu': lwp.host_cpu_percent(),
'uptime': lwp.host_uptime(),
'disk': lwp.host_disk_usage()})
@mod.route('/_refresh_memory_<name>')
@if_logged_in()
def refresh_memory_containers(name=None):
if name == 'containers':
containers_running = lxc.running()
containers = []
for container in containers_running:
container = container.replace(' (auto)', '')
containers.append({'name': container, 'memusg': lwp.memory_usage(container),
'settings': lwp.get_container_settings(container)})
return jsonify(data=containers)
elif name == 'host':
return jsonify(lwp.host_memory_usage())
return jsonify({'memusg': lwp.memory_usage(name)})
@mod.route('/_check_version')
@if_logged_in()
def check_version():
return jsonify(lwp.check_version())
|
FrodeSolheim/fs-uae-launcher
|
refs/heads/master
|
workspace/apps/__init__.py
|
12133432
| |
CatoTH/OpenSlides
|
refs/heads/master
|
server/openslides/chat/views.py
|
5
|
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from rest_framework.utils.serializer_helpers import ReturnDict
from openslides.utils.auth import has_perm
from openslides.utils.autoupdate import (
disable_history,
inform_changed_data,
inform_deleted_data,
)
from openslides.utils.rest_api import (
CreateModelMixin,
DestroyModelMixin,
GenericViewSet,
ModelViewSet,
Response,
action,
status,
)
from .models import ChatGroup, ChatMessage
ENABLE_CHAT = getattr(settings, "ENABLE_CHAT", False)
class ChatGroupViewSet(ModelViewSet):
"""
API endpoint for chat groups.
There are the following views: metadata, list, retrieve, create,
partial_update, update, destroy and clear.
"""
queryset = ChatGroup.objects.all()
def check_view_permissions(self):
"""
Returns True if the user has required permissions.
"""
if self.action in ("list", "retrieve"):
result = True
else:
result = has_perm(self.request.user, "chat.can_manage")
return result and ENABLE_CHAT
def update(self, *args, **kwargs):
response = super().update(*args, **kwargs)
# Update all affected chatmessages to update their `read_groups_id` and
# `write_groups_id` field, which is taken from the updated chatgroup.
inform_changed_data(ChatMessage.objects.filter(chatgroup=self.get_object()))
return response
@action(detail=True, methods=["POST"])
def clear(self, request, *args, **kwargs):
"""
Deletes all chat messages of the group.
"""
messages = self.get_object().messages.all()
messages_id = [message.id for message in messages]
messages.delete()
collection = ChatMessage.get_collection_string()
inform_deleted_data((collection, id) for id in messages_id)
return Response()
class ChatMessageViewSet(
CreateModelMixin,
DestroyModelMixin,
GenericViewSet,
):
"""
API endpoint for chat groups.
There are the following views: metadata, list, retrieve, create
"""
queryset = ChatMessage.objects.all()
def check_view_permissions(self):
# The permissions are checked in the view.
return ENABLE_CHAT and not isinstance(self.request.user, AnonymousUser)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
if not serializer.validated_data["chatgroup"].can_write(self.request.user):
self.permission_denied(self.request)
# Do not use the serializer.save since it will put the model in the history.
validated_data = {
**serializer.validated_data,
"username": self.request.user.short_name(),
"user_id": self.request.user.id,
}
chatmessage = ChatMessage(**validated_data)
chatmessage.save(disable_history=True)
return Response(
ReturnDict(id=chatmessage.id, serializer=serializer),
status=status.HTTP_201_CREATED,
)
def destroy(self, request, *args, **kwargs):
if (
not has_perm(self.request.user, "chat.can_manage")
and self.get_object().user_id != self.request.user.id
):
self.permission_denied(request)
disable_history()
return super().destroy(request, *args, **kwargs)
|
flavour/tldrmp
|
refs/heads/master
|
modules/tests/volunteer/__init__.py
|
28
|
from volunteer import *
from create_volunteer_job_title import *
from create_volunteer_programme import *
from create_volunteer_skill import *
from create_volunteer_certificate import *
from create_volunteer import *
from create_volunteer_training import *
from volunteer_search import *
from export_volunteers import *
|
razvanphp/arangodb
|
refs/heads/devel
|
3rdParty/V8-3.31.74.1/third_party/python_26/Lib/encodings/iso8859_10.py
|
593
|
""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
18098924759/Wox
|
refs/heads/master
|
PythonHome/Lib/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py
|
2800
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
ZeitOnline/zeit.content.cp
|
refs/heads/master
|
src/zeit/content/cp/tests/__init__.py
|
9480
|
#
|
olegvg/telebot
|
refs/heads/master
|
telebot/storage/__init__.py
|
1
|
# -*- coding: utf-8 -*-
import abc
import logging
from telebot import config, utils
logger = logging.getLogger('storage')
class StorageMixin(object):
__metaclass__ = utils.AbstractSingletonMeta
@abc.abstractmethod
def get_by_key(self, key, default=None):
pass
@abc.abstractmethod
def set_by_key(self, key, value):
pass
@abc.abstractmethod
def del_by_key(self, key):
pass
@abc.abstractmethod
def is_key_exists(self, key):
pass
@abc.abstractmethod
def clear(self):
pass
class StorageStub(object):
def get_by_key(self, key, default=None):
raise NotImplementedError
def set_by_key(self, key, value):
raise NotImplementedError
def del_by_key(self, key):
raise NotImplementedError
def is_key_exists(self, key):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def register_storage():
ephemeral_storage_plugin = config.get_config(
lambda x: x['telebot']['ephemeral_storage_plugin'],
'local_storage'
)
persistent_storage_plugin = config.get_config(
lambda x: x['telebot']['persistent_storage_plugin'],
'local_storage'
)
global EphemeralStorage, PersistentStorage
logger.debug("Storage plugin '{}' is going to be registered as ephemeral".format(ephemeral_storage_plugin))
_ephemeral_module = __import__(ephemeral_storage_plugin, globals(), locals(), [], -1)
EphemeralStorage = _ephemeral_module.EphemeralStorage
logger.debug("Storage plugin '{}' is going to be registered as persistent".format(persistent_storage_plugin))
_persistent_module = __import__(persistent_storage_plugin, globals(), locals(), [], -1)
PersistentStorage = _persistent_module.PersistentStorage
EphemeralStorage = StorageStub()
PersistentStorage = StorageStub()
|
kmod/icbd
|
refs/heads/master
|
icbd/type_analyzer/tests/imports.py
|
1
|
import basic # 7 module 'basic'
a = basic.a # 0 int
import sys # 7 module 'sys'
sys # 0 module 'sys'
import os.path
os # 0 module 'os'
p = path # 0 <unknown> # 4 <unknown> # e 4
from os import path
path # 0 module 'path'
import os.doesnt_exist # e 0
from foo import bar # e 0 # 5 <unknown> # 16 <unknown>
bar # 0 <unknown>
from re import search, match # 15 (str,str,int?) -> Match # 23 (str,str,int?) -> Match
import os.path as a
a # 0 module 'path'
from . import control_flow # e 0
|
ivanalejandro0/RequireType
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
alihanniba/pythonDemo
|
refs/heads/master
|
qqSpider/dynamic_spider.py
|
1
|
#!/usr/bin/python
#_*_ coding:utf8 _*_
import requests
import re
import json
head = {'User-Agent': \
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36'}
start_url = 'http://v.qq.com/cover/n/nvblqd32r7lr8x6.html?vid=x0018h9wuat'
commet_url = 'http://sns.video.qq.com/fcgi-bin/video_comment_id?otype=json&low_login=1&op=3&vid='
last_url = 'http://coral.qq.com/article/jjjj/comment?commentid=6077082562022945024&reqnum=20'
#解析第一个url
jscontent = requests.get(start_url,headers = head).content
#匹配vid
vid = re.findall('vid:"(.*?)"',jscontent,re.S)
#生成第二个url
middle_url = str(commet_url) + vid[0]
#解析第二个url
twocontent = requests.get(middle_url,headers = head).content
#匹配comment_id
comment_id = re.findall('"comment_id":"(.*?)","result"',twocontent,re.S)
#替换字符串,生成最终请求url
request_url = last_url.replace('jjjj',comment_id[0])
def parseJson(response):
#请求最终url,解析json字符串
comments = requests.get(response,headers = head).content
jsonMain = json.loads(comments)
data = jsonMain['data']
last = data['last']
print 'last:' +' ' + last
commentid = data['commentid']
for each in commentid:
print each['userinfo']['nick'] + ' ' + each['content']
print '----------------------------------------------------------------------'
#匹配commentid,并以last替换
commentid = re.findall('[1-9][0-9]{4,}',response,re.S)
new_url = response.replace(commentid[1],last)
if new_url == response:
print "没有评论了"
else:
parseJson(new_url)
# print commentid[1]
parseJson(request_url)
|
simonjbeaumont/sm
|
refs/heads/master
|
tests/lvhd_test/logger.py
|
12
|
#!/usr/bin/python
logger = None
|
olatoft/reverse-hangman
|
refs/heads/master
|
lib/python3.5/site-packages/pip/cmdoptions.py
|
136
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help='Base URL of Python Package Index (default %default).')
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.'
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
|
pnichols104/python-koans
|
refs/heads/master
|
python3/runner/mountain.py
|
127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
from . import path_to_enlightenment
from .sensei import Sensei
from .writeln_decorator import WritelnDecorator
class Mountain:
def __init__(self):
self.stream = WritelnDecorator(sys.stdout)
self.tests = path_to_enlightenment.koans()
self.lesson = Sensei(self.stream)
def walk_the_path(self, args=None):
"Run the koans tests with a custom runner output."
if args and len(args) >=2:
self.tests = unittest.TestLoader().loadTestsFromName("koans." + args[1])
self.tests(self.lesson)
self.lesson.learn()
return self.lesson
|
ngoix/OCRF
|
refs/heads/master
|
doc/datasets/rcv1_fixture.py
|
238
|
"""Fixture module to skip the datasets loading when offline
The RCV1 data is rather large and some CI workers such as travis are
stateless hence will not cache the dataset as regular sklearn users would do.
The following will skip the execution of the rcv1.rst doctests
if the proper environment variable is configured (see the source code of
check_skip_network for more details).
"""
from sklearn.utils.testing import check_skip_network, SkipTest
import os
from sklearn.datasets import get_data_home
def setup_module():
check_skip_network()
# skip the test in rcv1.rst if the dataset is not already loaded
rcv1_dir = os.path.join(get_data_home(), "RCV1")
if not os.path.exists(rcv1_dir):
raise SkipTest("Download RCV1 dataset to run this test.")
|
yury-s/v8-inspector
|
refs/heads/master
|
Source/chrome/tools/telemetry/telemetry/value/skip.py
|
8
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import value as value_module
class SkipValue(value_module.Value):
def __init__(self, page, reason, description=None):
"""A value representing a skipped page.
Args:
page: The skipped page object.
reason: The string reason the page was skipped.
"""
super(SkipValue, self).__init__(page, 'skip', '', True, description, None)
self._reason = reason
def __repr__(self):
page_name = self.page.url
return 'SkipValue(%s, %s)' % (page_name, self._reason)
@property
def reason(self):
return self._reason
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetChartAndTraceNameForPerPageResult(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'skip'
def AsDict(self):
d = super(SkipValue, self).AsDict()
d['reason'] = self._reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
del kwargs['name']
del kwargs['units']
if 'important' in kwargs:
del kwargs['important']
kwargs['reason'] = value_dict['reason']
if 'tir_label' in kwargs:
del kwargs['tir_label']
return SkipValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert False, 'Should not be called.'
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
assert False, 'Should not be called.'
|
mafiya69/sympy
|
refs/heads/master
|
sympy/matrices/expressions/determinant.py
|
92
|
from __future__ import print_function, division
from sympy import Basic, Expr, S, sympify
from .matexpr import ShapeError
class Determinant(Expr):
"""Matrix Determinant
Represents the determinant of a matrix expression.
>>> from sympy import MatrixSymbol, Determinant, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Determinant(A)
Determinant(A)
>>> Determinant(eye(3)).doit()
1
"""
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Determinant, %s, not a matrix" % str(mat))
if not mat.is_square:
raise ShapeError("Det of a non-square matrix")
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
def doit(self, expand=False):
try:
return self.arg._eval_determinant()
except (AttributeError, NotImplementedError):
return self
def det(matexpr):
""" Matrix Determinant
>>> from sympy import MatrixSymbol, det, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> det(A)
Determinant(A)
>>> det(eye(3))
1
"""
return Determinant(matexpr).doit()
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_Determinant(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine, det
>>> X = MatrixSymbol('X', 2, 2)
>>> det(X)
Determinant(X)
>>> with assuming(Q.orthogonal(X)):
... print(refine(det(X)))
1
"""
if ask(Q.orthogonal(expr.arg), assumptions):
return S.One
elif ask(Q.singular(expr.arg), assumptions):
return S.Zero
elif ask(Q.unit_triangular(expr.arg), assumptions):
return S.One
return expr
handlers_dict['Determinant'] = refine_Determinant
|
ubic135/odoo-design
|
refs/heads/master
|
addons/web_view_editor/__openerp__.py
|
423
|
{
'name': 'View Editor',
'category': 'Hidden',
'description': """
OpenERP Web to edit views.
==========================
""",
'version': '2.0',
'depends':['web'],
'data' : [
'views/web_view_editor.xml',
],
'qweb': ['static/src/xml/view_editor.xml'],
'auto_install': True,
}
|
meawoppl/babyfood
|
refs/heads/master
|
babyfood/pcb/__init__.py
|
12133432
| |
ShiYw/Sigil
|
refs/heads/master
|
3rdparty/python/Lib/test/test_curses.py
|
10
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import sys
import tempfile
import unittest
from test.support import requires, import_module, verbose
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import inspect
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
curses.panel = import_module('curses.panel')
term = os.environ.get('TERM', 'unknown')
@unittest.skipUnless(sys.__stdout__.isatty(), 'sys.__stdout__ is not a tty')
@unittest.skipIf(term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
curses.setupterm(fd=sys.__stdout__.fileno())
def setUp(self):
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def test_module_funcs(self):
"Test module-level functions"
stdscr = self.stdscr
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def test_unctrl(self):
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
self.assertEqual(ascii.unctrl(ch), expected,
'curses.unctrl fails on character %r' % ch)
def test_userptr_without_set(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
with self.assertRaises(curses.panel.error,
msg='userptr should fail since not set'):
p.userptr()
def test_userptr_memory_leak(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
self.assertEqual(sys.getrefcount(obj), nrefs,
"set_userptr leaked references")
def test_userptr_segfault(self):
panel = curses.panel.new_panel(self.stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
@unittest.skipUnless(hasattr(curses, 'resizeterm'),
'resizeterm not available')
def test_resize_term(self):
lines, cols = curses.LINES, curses.COLS
new_lines = lines - 1
new_cols = cols + 1
curses.resizeterm(new_lines, new_cols)
self.assertEqual(curses.LINES, new_lines)
self.assertEqual(curses.COLS, new_cols)
def test_issue6243(self):
curses.ungetch(1025)
self.stdscr.getkey()
@unittest.skipUnless(hasattr(curses, 'unget_wch'),
'unget_wch not available')
def test_unget_wch(self):
stdscr = self.stdscr
encoding = stdscr.encoding
for ch in ('a', '\xe9', '\u20ac', '\U0010FFFF'):
try:
ch.encode(encoding)
except UnicodeEncodeError:
continue
try:
curses.unget_wch(ch)
except Exception as err:
self.fail("unget_wch(%a) failed with encoding %s: %s"
% (ch, stdscr.encoding, err))
read = stdscr.get_wch()
self.assertEqual(read, ch)
code = ord(ch)
curses.unget_wch(code)
read = stdscr.get_wch()
self.assertEqual(read, ch)
def test_issue10570(self):
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
self.assertIs(type(b), bytes)
curses.putp(b)
def test_encoding(self):
stdscr = self.stdscr
import codecs
encoding = stdscr.encoding
codecs.lookup(encoding)
with self.assertRaises(TypeError):
stdscr.encoding = 10
stdscr.encoding = encoding
with self.assertRaises(TypeError):
del stdscr.encoding
def test_issue21088(self):
stdscr = self.stdscr
#
# http://bugs.python.org/issue21088
#
# the bug:
# when converting curses.window.addch to Argument Clinic
# the first two parameters were switched.
# if someday we can represent the signature of addch
# we will need to rewrite this test.
try:
signature = inspect.signature(stdscr.addch)
self.assertFalse(signature)
except ValueError:
# not generating a signature is fine.
pass
# So. No signature for addch.
# But Argument Clinic gave us a human-readable equivalent
# as the first line of the docstring. So we parse that,
# and ensure that the parameters appear in the correct order.
# Since this is parsing output from Argument Clinic, we can
# be reasonably certain the generated parsing code will be
# correct too.
human_readable_signature = stdscr.addch.__doc__.split("\n")[0]
offset = human_readable_signature.find("[y, x,]")
assert offset >= 0, ""
if __name__ == '__main__':
unittest.main()
|
JoseALermaIII/python-tutorials
|
refs/heads/master
|
pythontutorials/books/AutomateTheBoringStuff/Ch13/P5_readDocx.py
|
1
|
#! python3
"""Read docx
Accepts a filename of a .docx file and returns a single string value of its text.
Note:
* Example .docx files can be downloaded from http://nostarch.com/automatestuff/
"""
import docx
def getText(filename: str) -> str:
"""Get text
Gets text from a given .docx file.
Args:
filename: Path to .docx file to get text from.
Returns:
String with all document text.
"""
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
#fullText.append(' ' + para.text) # Alt: indent each paragraph
return "\n".join(fullText)
#return "\n\n".join(fullText) # Alt: double space between paragraphs
|
marctc/django-blog-zinnia
|
refs/heads/develop
|
zinnia/flags.py
|
9
|
"""Comment flags for Zinnia"""
from django.utils.lru_cache import lru_cache
from django.contrib.auth import get_user_model
from zinnia.settings import COMMENT_FLAG_USER_ID
PINGBACK = 'pingback'
TRACKBACK = 'trackback'
FLAGGER_USERNAME = 'Zinnia-Flagger'
@lru_cache(1)
def get_user_flagger():
"""
Return an User instance used by the system
when flagging a comment as trackback or pingback.
"""
User = get_user_model()
try:
user = User.objects.get(pk=COMMENT_FLAG_USER_ID)
except User.DoesNotExist:
try:
user = User.objects.get(**{User.USERNAME_FIELD: FLAGGER_USERNAME})
except User.DoesNotExist:
user = User.objects.create_user(FLAGGER_USERNAME)
return user
|
Pluto-tv/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/telemetry/internal/actions/play.py
|
32
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "play" action on media elements.
Media elements can be specified by a selector argument. If no selector is
defined then then the action attempts to play the first video element or audio
element on the page. A selector can also be 'all' to play all media elements.
Other arguments to use are: playing_event_timeout_in_seconds and
ended_event_timeout_in_seconds, which forces the action to wait until
playing and ended events get fired respectively.
"""
from telemetry.core import exceptions
from telemetry.internal.actions import media_action
from telemetry.internal.actions import page_action
class PlayAction(media_action.MediaAction):
def __init__(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
super(PlayAction, self).__init__()
self._selector = selector if selector else ''
self._playing_event_timeout_in_seconds = playing_event_timeout_in_seconds
self._ended_event_timeout_in_seconds = ended_event_timeout_in_seconds
def WillRunAction(self, tab):
"""Load the media metrics JS code prior to running the action."""
super(PlayAction, self).WillRunAction(tab)
self.LoadJS(tab, 'play.js')
def RunAction(self, tab):
try:
tab.ExecuteJavaScript('window.__playMedia("%s");' % self._selector)
# Check if we need to wait for 'playing' event to fire.
if self._playing_event_timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, 'playing',
self._playing_event_timeout_in_seconds)
# Check if we need to wait for 'ended' event to fire.
if self._ended_event_timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, 'ended',
self._ended_event_timeout_in_seconds)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Cannot play media element(s) with '
'selector = %s.' % self._selector)
|
afronski/grammar-generator
|
refs/heads/master
|
grammar-generator/Elements/STG/Factories/StgSortingElementsFactory.py
|
1
|
from Elements.STG.Factories.Base.IStgClausesElementsFactory import IStgClausesElementsFactory
from Elements.STG.Sorting.SortingSpecifiedColumnElementForStg import SortingSpecifiedColumnElementForStg
from Elements.STG.Sorting.SortingElementWithColumnIdForStg import SortingElementWithColumnIdForStg
from Elements.STG.Sorting.SortingElementForStg import SortingElementForStg
class StgSortingElementsFactory(IStgClausesElementsFactory):
def getAttributeNameForSpecifiedElement(self):
return "SortingColumnName"
def createSpecifiedColumnElement(self, templates, settingsObject):
return SortingSpecifiedColumnElementForStg(templates, settingsObject)
def createElementWithColumnID(self, templates, settingsObject):
return SortingElementWithColumnIdForStg(templates, settingsObject)
def createElement(self, templates, settingsObject):
return SortingElementForStg(templates, settingsObject)
|
tailorian/Sick-Beard
|
refs/heads/ThePirateBay
|
sickbeard/notifiers/tweet.py
|
11
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard import logger, common
from sickbeard.exceptions import ex
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl #@UnusedImport
except:
from cgi import parse_qsl #@Reimport
import lib.oauth2 as oauth
import lib.pythontwitter as twitter
class TwitterNotifier:
consumer_key = "vHHtcB6WzpWDG6KYlBMr8g"
consumer_secret = "zMqq5CB3f8cWKiRO2KzWPTlBanYmV0VYxSXZ0Pxds0E"
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def notify_snatch(self, ep_name):
if sickbeard.TWITTER_NOTIFY_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+ep_name)
def notify_download(self, ep_name):
if sickbeard.TWITTER_NOTIFY_ONDOWNLOAD:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD]+' '+ep_name + ": " + lang)
def test_notify(self):
return self._notifyTwitter("This is a test notification from Sick Beard", force=True)
def _get_authorization(self):
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.log('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.log('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content))
sickbeard.TWITTER_USERNAME = request_token['oauth_token']
sickbeard.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = sickbeard.TWITTER_USERNAME
request_token['oauth_token_secret'] = sickbeard.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.log('Generating and signing request for an access token using key '+key)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.log('oauth_consumer: '+str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.log('oauth_client: '+str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.log('resp, content: '+str(resp)+','+str(content))
access_token = dict(parse_qsl(content))
logger.log('access_token: '+str(access_token))
logger.log('resp[status] = '+str(resp['status']))
if resp['status'] != '200':
logger.log('The request for a token with did not succeed: '+str(resp['status']), logger.ERROR)
return False
else:
logger.log('Your Twitter Access Token key: %s' % access_token['oauth_token'])
logger.log('Access Token secret: %s' % access_token['oauth_token_secret'])
sickbeard.TWITTER_USERNAME = access_token['oauth_token']
sickbeard.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username=self.consumer_key
password=self.consumer_secret
access_token_key=sickbeard.TWITTER_USERNAME
access_token_secret=sickbeard.TWITTER_PASSWORD
logger.log(u"Sending tweet: "+message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message.encode('utf8'))
except Exception, e:
logger.log(u"Error Sending Tweet: "+ex(e), logger.ERROR)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = sickbeard.TWITTER_PREFIX
if not sickbeard.USE_TWITTER and not force:
return False
return self._send_tweet(prefix+": "+message)
notifier = TwitterNotifier
|
maestrano/odoo
|
refs/heads/master
|
addons/hw_escpos/controllers/main.py
|
125
|
# -*- coding: utf-8 -*-
import commands
import logging
import simplejson
import os
import os.path
import io
import base64
import openerp
import time
import random
import math
import md5
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import pickle
import re
import subprocess
import traceback
from threading import Thread, Lock
from Queue import Queue, Empty
try:
import usb.core
except ImportError:
usb = None
try:
from .. import escpos
from ..escpos import printer
from ..escpos import supported_devices
except ImportError:
escpos = printer = None
from PIL import Image
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def supported_devices(self):
if not os.path.isfile('escpos_devices.pickle'):
return supported_devices.device_list
else:
try:
f = open('escpos_devices.pickle','r')
return pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
return supported_devices.device_list
def add_supported_device(self,device_string):
r = re.compile('[0-9A-Fa-f]{4}:[0-9A-Fa-f]{4}');
match = r.search(device_string)
if match:
match = match.group().split(':')
vendor = int(match[0],16)
product = int(match[1],16)
name = device_string.split('ID')
if len(name) >= 2:
name = name[1]
else:
name = name[0]
_logger.info('ESC/POS: adding support for device: '+match[0]+':'+match[1]+' '+name)
device_list = supported_devices.device_list[:]
if os.path.isfile('escpos_devices.pickle'):
try:
f = open('escpos_devices.pickle','r')
device_list = pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
device_list.append({
'vendor': vendor,
'product': product,
'name': name,
})
try:
f = open('escpos_devices.pickle','w+')
f.seek(0)
pickle.dump(device_list,f)
f.close()
except Exception as e:
self.set_status('error',str(e))
def connected_usb_devices(self):
connected = []
for device in self.supported_devices():
if usb.core.find(idVendor=device['vendor'], idProduct=device['product']) != None:
connected.append(device)
return connected
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def get_escpos_printer(self):
try:
printers = self.connected_usb_devices()
if len(printers) > 0:
self.set_status('connected','Connected to '+printers[0]['name'])
return escpos.printer.Usb(printers[0]['vendor'], printers[0]['product'])
else:
self.set_status('disconnected','Printer Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: '+message)
def run(self):
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'printstatus':
self.print_status(printer)
elif task == 'status':
pass
except Exception as e:
self.set_status('error', str(e))
errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
_logger.error(errmsg);
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_status(self,eprint):
localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
ips = [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
ips = [ ip for ip in ips if ip not in localips ]
eprint.text('\n\n')
eprint.set(align='center',type='b',height=2,width=2)
eprint.text('PosBox Status\n')
eprint.text('\n')
eprint.set(align='center')
if len(ips) == 0:
eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
elif len(ips) == 1:
eprint.text('IP Address:\n'+ips[0]+'\n')
else:
eprint.text('IP Addresses:\n')
for ip in ips:
eprint.text(ip+'\n')
if len(ips) >= 1:
eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')
eprint.text('\n\n')
eprint.cut()
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Unit(s)':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
driver.push_task('printstatus')
hw_proxy.drivers['escpos'] = driver
class EscposProxy(hw_proxy.Proxy):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
@http.route('/hw_proxy/escpos/add_supported_device', type='http', auth='none', cors='*')
def add_supported_device(self, device_string):
_logger.info('ESC/POS: ADDED NEW DEVICE:'+device_string)
driver.add_supported_device(device_string)
return "The device:\n"+device_string+"\n has been added to the list of supported devices.<br/><a href='/hw_proxy/status'>Ok</a>"
@http.route('/hw_proxy/escpos/reset_supported_devices', type='http', auth='none', cors='*')
def reset_supported_devices(self):
try:
os.remove('escpos_devices.pickle')
except Exception as e:
pass
return 'The list of supported devices has been reset to factory defaults.<br/><a href="/hw_proxy/status">Ok</a>'
|
Denisolt/Tensorflow_Chat_Bot
|
refs/heads/master
|
local/lib/python2.7/site-packages/scipy/constants/setup.py
|
159
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('constants', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
vlachoudis/sl4a
|
refs/heads/master
|
python-build/python-libs/gdata/tests/all_tests_coverage.py
|
87
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.data, gdata.blogger.data, gdata.blogger.client])
|
opencloudinfra/orchestrator
|
refs/heads/master
|
venv/Lib/site-packages/wheel/test/test_tagopt.py
|
326
|
"""
Tests for the bdist_wheel tag options (--python-tag, --universal, and
--plat-name)
"""
import sys
import shutil
import pytest
import py.path
import tempfile
import subprocess
SETUP_PY = """\
from setuptools import setup, Extension
setup(
name="Test",
version="1.0",
author_email="author@example.com",
py_modules=["test"],
{ext_modules}
)
"""
EXT_MODULES = "ext_modules=[Extension('_test', sources=['test.c'])],"
@pytest.fixture
def temp_pkg(request, ext=False):
tempdir = tempfile.mkdtemp()
def fin():
shutil.rmtree(tempdir)
request.addfinalizer(fin)
temppath = py.path.local(tempdir)
temppath.join('test.py').write('print("Hello, world")')
if ext:
temppath.join('test.c').write('#include <stdio.h>')
setup_py = SETUP_PY.format(ext_modules=EXT_MODULES)
else:
setup_py = SETUP_PY.format(ext_modules='')
temppath.join('setup.py').write(setup_py)
return temppath
@pytest.fixture
def temp_ext_pkg(request):
return temp_pkg(request, ext=True)
def test_default_tag(temp_pkg):
subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename == 'Test-1.0-py%s-none-any.whl' % (sys.version[0],)
assert wheels[0].ext == '.whl'
def test_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_universal_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_beats_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_pythontag_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\npython_tag=py32')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_legacy_wheel_section_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.pure'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext(temp_ext_pkg):
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.arch'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy_in_setupcfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.pure')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext_in_setupcfg(temp_ext_pkg):
temp_ext_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.arch')
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
|
unaizalakain/django
|
refs/heads/master
|
django/contrib/gis/utils/__init__.py
|
327
|
"""
This module contains useful utilities for GeoDjango.
"""
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.utils.wkt import precision_wkt # NOQA
if HAS_GDAL:
from django.contrib.gis.utils.ogrinfo import ogrinfo, sample # NOQA
from django.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA
from django.contrib.gis.utils.srs import add_postgis_srs, add_srs_entry # NOQA
from django.core.exceptions import ImproperlyConfigured
try:
# LayerMapping requires DJANGO_SETTINGS_MODULE to be set,
# so this needs to be in try/except.
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA
except ImproperlyConfigured:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.