repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
tanchao/chaos.trade | __init__.py | Python | bsd-2-clause | 1,215 | 0 | import sys
import os
cur_path = os.getcwd()
sys.path.append(cur_path)
start_env = '''
1. mysqld should be start by sys
2. ngnix should be start by sys
3. need manually start uwsgi: . uwsgi_chaos.sh
note socket 127.0.0.1:9527 is takend by it
'''
init_env = '''
1. install git:
sudo yum install git
2. setup git:
https://help.github.com/articles/set-up-git/
https://help.github.com/articles/generating-ssh-keys/
3. install Flask (py | thon and pip were installed by aws default):
sudo pup install Flask
4. create project:
| ERROR: type should be string, got " https://github.com/tanchao/chaos.trade\n 5. install mysql:\n sudo yum install mysql mysql-server mysql-libs mysql-devel\n sudo service mysqld start\n sudo chkconfig --level 35 mysqld on\n chkconfig --list | grep mysql\n http://jingyan.baidu.com/article/acf728fd10c3d6f8e510a3ef.html\n http://www.360doc.com/content/15/0516/11/14900341_470864335.shtml\n http://www.cnblogs.com/bjzhanghao/archive/2011/07/24/2115350.html\n 6. install web server\n yum install nginx\n sudo service nginx start\n sudo yum install gcc\n sudo CC=gcc pip install uwsgi\n'''\n" |
carmelom/sisview-bec-tn | sisview.py | Python | gpl-3.0 | 10,034 | 0.005382 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Carmelo Mordini
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os
from PyQt4 import QtGui, QtCore
import numpy as np
try:
import pyfftw.interfaces.scipy_fftpack as fftpack
except ImportError:
try:
import scipy.fftpack as fftpack
except ImportError:
fftpack = np.fft
fft2, fftshift = fftpack.fft2, fftpack.fftshift
import matplotlib
from matplotlib.pyplot import colormaps
matplotlib.use('Qt4Agg')
from libraries.libsis import RawSis
from libraries.mainwindow_ui import Ui_MainWindow
PROG_NAME = 'SISView'
PROG_COMMENT = 'A tool for a quick visualization of .sis files'
PROG_VERSION = '0.9.2'
DEFAULT_FOLDER = '/media/bec/Data/SIScam/SIScamProgram/Prog/img' #None
#from PyQt4.QtGui import (QApplication, QColumnView, QFileSystemModel,
# QSplitter, QTreeView)
#from PyQt4.QtCore import QDir, Qt
class Main(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, ):
super(Main, self).__init__()
self.cwd = os.getcwd()
self.setupUi(self)
self.tableWidget.setupUi(self)
self.plotWidget0.setupUi(self)
self.plotWidget1.setupUi(self)
self.setWindowTitle(PROG_NAME+' '+PROG_VERSION)
self.setupToolbar()
self.rewriteTreeView()
# self.rewritePlotWidget()
self.connectActions()
self.openFolder(DEFAULT_FOLDER)
self.currentSis = None
self.fft_flag = None
self.dockAreasDict = {'Top': QtCore.Qt.TopDockWidgetArea, 'Right': QtCore.Qt.RightDockWidgetArea}
def setupToolbar(self,):
font = QtGui.QFont()
font.setPointSize(10)
self.toolBar.setFont(font)
self.toolBar.setStyleSheet('QToolBar{spacing:6px;}')
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.actionBack = QtGui.QAction(self.toolBar)
self.actionBack.setIcon(QtGui.QIcon(":/icons/Back-icon.png"))
self.actionBack.setObjectName("actionBack")
self.actionBack.setIconText("Back")
self.toolBar.addAction(self.actionBack)
self.toolBar.addSeparator()
self.colormapLabel = QtGui.QLabel(self.toolBar)
self.colormapLabel.setText('Colormap')
self.colormapLabel.setFont(font)
self.toolBar.addWidget(self.colormapLabel)
self.colormapComboBox = QtGui.QComboBox(self.toolBar)
self.colormapComboBox.setFont(font)
self.colormapComboBox.addItems(colormaps())
self.colormapComboBox.setCurrentIndex(106)
self.toolBar.addWidget(self.colormapComboBox)
self.toolBar.addSeparator()
self.vminLabel = QtGui.QLabel(self.toolBar)
self.vminLabel.setText('Vmin')
self.vminLabel.setFont(font)
self.toolBar.addWidget(self.vminLabel)
self.vminDoubleSpinBox = QtGui.QDoubleSpinBox(self.toolBar)
self.toolBar.addWidget(self.vminDoubleSpinBox)
self.vmaxLabel = QtGui.QLabel(self.toolBar)
self.vmaxLabel.setText('Vmax')
self.vmaxLabel.setFont(font)
self.toolBar.addWidget(self.vmaxLabel)
self.vmaxDoubleSpinBox = QtGui.QDoubleSpinBox(self.toolBar)
self.vmaxDoubleSpinBox.setValue(2.0)
self.toolBar.addWidget(self.vmaxDoubleSpinBox)
self.toolBar.addSeparator()
self.fftLabel = QtGui.QLabel(self.toolBar)
self.fftLabel.setText('FFT plot')
self.fftLabel.setFont(font)
self.toolBar.addWidget(self.fftLabel)
self.fftComboBox = QtGui.QComboBox(self.toolBar)
self.fftComboBox.setFont(font)
self.fftComboBox.addItems(['None', 'Im0', 'Im1'])
self.fftComboBox.setCurrentIndex(0)
self.toolBar.addWidget(self.fftComboBox)
self.toolBar.addSeparator()
def rewriteTreeView(self,):
self.model = QtGui.QFileSystemModel()
self.model.setRootPath(QtCore.QDir.rootPath())
self.treeView.setModel(self.model)
self.header = self.treeView.header()
self.header.hideSection(1)
self.header.setResizeMode(0, QtGui.QHeaderView.ResizeToContents)
ROOT = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() #QtCore.QDir.homePath()
self.currentFolder = ROOT
self.treeView.setRootIndex(self.model.index(ROOT))
pass
def connectActions(self):
self.actionOpen_Folder.triggered.connect(self.openFolder)
self.treeView.activated.connect(self.openSis)
self.treeView.activated.connect(self.openCs | v)
self.treeView.doubleClicked.connect(self.goToFolder)
self.colormapComboBox.currentIndexChanged.connect(lambda: self.replot(self.currentSis))
self.fftComboBox.currentIndexChanged.connect(self.set_fft_flag)
self.vminDoubleSpinBox.valueChanged.connect(lambda: self.replot(self.currentSis))
self.vmaxDoubleSpinBox.valueChanged.connect(lambda: self.replot(self.currentSis))
self.actionTop.triggered.connect(lambda: self.d | ock_to_area(self.actionTop.text()))
self.actionRight.triggered.connect(lambda: self.dock_to_area(self.actionRight.text()))
self.actionDetatch_All.triggered.connect(self.dock_detatch_all)
self.actionToggle0 = self.dockWidget0.toggleViewAction()
self.actionToggle1 = self.dockWidget1.toggleViewAction()
self.menuView.addAction(self.actionToggle0)
self.menuView.addAction(self.actionToggle1)
self.actionInfo.triggered.connect(self.infoBox)
self.actionQuit.triggered.connect(QtGui.qApp.quit)
self.actionBack.triggered.connect(self.goBackFolder)
pass
def dock_to_area(self, pos):
# positions seem to be: left = 1, right = 2, top = ?
for dock in [self.dockWidget0, self.dockWidget1]:
dock.setFloating(False)
self.addDockWidget(self.dockAreasDict[pos], dock)
self.showMaximized()
def dock_detatch_all(self):
for dock in [self.dockWidget0, self.dockWidget1]:
dock.setFloating(True)
self.showNormal()
def openFolder(self, folder=None):
if not folder or folder is None:
folder = QtGui.QFileDialog.getExistingDirectory(self, caption='Open folder',
# directory=QtCore.QDir.homePath(),
directory=self.currentFolder,
)
folder = folder if folder else self.currentFolder
print('OPEN:', folder)
self.currentFolder = folder
self.treeView.setRootIndex(self.model.index(folder))
def goToFolder(self, index):
path = self.model.filePath(index)
if os.path.isdir(path):
self.currentFolder = path
self.treeView.setRootIndex(self.model.index(path))
def goBackFolder(self):
parent = os.path.abspath(os.path.join(self.currentFolder, os.pardir))
print(parent)
self.currentFolder = parent
self.treeView.setRootIndex(self.model.index(parent))
pass
def openCsv(self, index):
path = self.model.filePath(index)
name = os.path.split(path)[1]
if os.path.isfile(path) and path.endswith('.csv'):
print(path)
self.csvLabel.setText('Display CSV: ' + name)
self.tableWidget.displayCSV(path)
def openSis(self, index):
path = self.model.filePath(index)
|
darrellsilver/norc | core/models/extras.py | Python | bsd-3-clause | 417 | 0.014388 |
from django.db.models import Model, CharField
class Revision(Model):
"""Represents a code revision | ."""
class Meta:
app_label = 'core'
db_table = 'norc_revision'
info = CharField(max_length=64, unique=True)
@staticmethod
def create(info):
return Revision.objects.create(in | fo=info)
def __str__(self):
return "[Revision %s]" % self.info
|
RayRuizhiLiao/ITK_4D | Modules/Filtering/ImageFeature/test/itkGradientVectorFlowImageFilterTest.py | Python | apache-2.0 | 2,887 | 0.000693 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file exc | ept in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Li | cense is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
from sys import argv
# Input arguments
inputFilename = argv[1]
xOutputFilename = argv[2]
yOutputFilename = argv[3]
iterNum = int(argv[4])
noiseLevel = float(argv[5])
timeStep = float(argv[6])
itk.auto_progress(2)
# Define ITK types
DIM = 2
PixelType = itk.F
GradientPixelType = itk.CovariantVector[PixelType, DIM]
ImageType = itk.Image[PixelType, DIM]
GradientImageType = itk.Image[GradientPixelType, DIM]
WriterPixelType = itk.UC
WriteImageType = itk.Image[WriterPixelType, DIM]
GradientImageFilterType = itk.GradientImageFilter[
ImageType, PixelType, PixelType]
LaplacianImageFilterType = itk.LaplacianImageFilter[
ImageType, ImageType]
GradientVectorFlowImageFilterType = itk.GradientVectorFlowImageFilter[
GradientImageType, GradientImageType, PixelType]
VectorIndexSelectionCastImageFilter = itk.VectorIndexSelectionCastImageFilter[
GradientImageType, ImageType]
RescaleIntensityImageFilter = itk.RescaleIntensityImageFilter[
ImageType, WriteImageType]
# Read image file
reader = itk.ImageFileReader[ImageType].New(FileName=inputFilename)
# Compute gradient for image
gradient = GradientImageFilterType.New()
gradient.SetInput(reader.GetOutput())
laplacian = LaplacianImageFilterType.New()
# Compute GVF for image
gvf = GradientVectorFlowImageFilterType.New()
gvf.SetInput(gradient.GetOutput())
gvf.SetLaplacianFilter(laplacian)
gvf.SetIterationNum(iterNum)
gvf.SetNoiseLevel(noiseLevel)
gvf.SetTimeStep(timeStep)
gvf.Update()
# Write vector field components to image files
for i, fileName in enumerate((xOutputFilename, yOutputFilename)):
visc = VectorIndexSelectionCastImageFilter.New()
visc.SetInput(gvf.GetOutput())
visc.SetIndex(i)
visc.Update()
rescaler = RescaleIntensityImageFilter.New()
rescaler.SetOutputMinimum(0)
rescaler.SetOutputMaximum(255)
rescaler.SetInput(visc.GetOutput())
rescaler.Update()
writer = itk.ImageFileWriter[WriteImageType].New(
rescaler.GetOutput(), FileName=fileName)
writer.Update()
|
timedata-org/expressy | expressy/__init__.py | Python | mit | 99 | 0 | from . import express | ion, units
p | arse = expression.Maker()
parse_with_units = units.inject(parse)
|
Iconik/eve-suite | src/model/static/planet/schematic_type_map.py | Python | gpl-3.0 | 914 | 0.00547 | '''
Created on Mar 20, 2011
@author: frederikns
'''
from model.flyweight import Flyweight
from collections import namedtuple
from model.static.database import database
from model.dynamic.inventory.item import Item
class SchematicTypeMap(Flyweight):
def __init__(self,schematic_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.schematic_id = schematic_id
cursor = database.get_cursor(
"select * from planetSchematicTypeMap where schematicID={};".format(self.schematic_id))
types = list()
schematic_type = namedtuple("schematic_type", "item, is_input")
for row in cursor:
types.append(schematic_type(
item=Item(ro | w["typeID"], row["quantity"]),
is_input=(ro | w["isInput"])))
cursor.close()
|
dmonroy/python-etcd | src/etcd/client.py | Python | mit | 27,306 | 0.001099 | """
.. module:: python-etcd
:synopsis: A python etcd client.
.. moduleauthor:: Jose Plana <jplana@gmail.com>
"""
import logging
try:
# Python 3
from http.client import HTTPException
except ImportError:
# Python 2
from httplib import HTTPException
import socket
import urllib3
import urllib3.util
import json
import ssl
import etcd
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from .result import EtcdResult
_log = logging.getLogger(__name__)
class Client(object):
"""
Client for etcd, the distributed log service using raft.
"""
_MGET = 'GET'
_MPUT = 'PUT'
_MPOST = 'POST'
_MDELETE = 'DELETE'
_comparison_conditions = set(('prevValue', 'prevIndex', 'prevExist'))
_read_options = set(('recursive', 'wait', 'waitIndex', 'sorted', 'quorum'))
_del_conditions = set(('prevValue', 'prevIndex'))
def __init__(
self,
host='127.0.0.1',
port=4001,
version_prefix='/v2',
read_timeout=60,
allow_redirect=True,
protocol='http',
cert=None,
ca_cert=None,
allow_reconnect=False,
use_proxies=False,
expected_cluster_id=None,
per_host_pool_size=10,
result_class=EtcdResult
):
"""
Initialize the client.
Args:
host (mixed):
If a string, IP to connect to.
If a tuple ((host, port), (host, port), ...)
port (int): Port used to connect to etcd.
version_prefix (str): Url or version prefix in etcd url (default=/v2).
read_timeout (int): max seconds to wait for a read.
allow_redirect (bool): allow the client to connect to other nodes.
protocol (str): Protocol used to connect to etcd.
cert (mixed): If a string, the whole ssl client certificate;
if a tuple, the cert and key file names.
ca_cert (str): The ca certificate. If pressent it will enable
validation.
| allow_reconnect (bool): allow the client to reconnect to another
etcd server in the cluster in the case the
default one does not respond.
use_proxies (bool): we are using a list of proxies to which we connect,
and don't | want to connect to the original etcd cluster.
expected_cluster_id (str): If a string, recorded as the expected
UUID of the cluster (rather than
learning it from the first request),
reads will raise EtcdClusterIdChanged
if they receive a response with a
different cluster ID.
per_host_pool_size (int): specifies maximum number of connections to pool
by host. By default this will use up to 10
connections.
result_class (EtcdResult subclass): class used for resulting nodes,
default to EtcdResult
"""
_log.debug("New etcd client created for %s:%s%s",
host, port, version_prefix)
self._protocol = protocol
self._result_class = result_class
def uri(protocol, host, port):
return '%s://%s:%d' % (protocol, host, port)
if not isinstance(host, tuple):
self._machines_cache = []
self._base_uri = uri(self._protocol, host, port)
else:
if not allow_reconnect:
_log.error("List of hosts incompatible with allow_reconnect.")
raise etcd.EtcdException("A list of hosts to connect to was given, but reconnection not allowed?")
self._machines_cache = [uri(self._protocol, *conn) for conn in host]
self._base_uri = self._machines_cache.pop(0)
self.expected_cluster_id = expected_cluster_id
self.version_prefix = version_prefix
self._read_timeout = read_timeout
self._allow_redirect = allow_redirect
self._use_proxies = use_proxies
self._allow_reconnect = allow_reconnect
# SSL Client certificate support
kw = {
'maxsize': per_host_pool_size
}
if self._read_timeout > 0:
kw['timeout'] = self._read_timeout
if protocol == 'https':
# If we don't allow TLSv1, clients using older version of OpenSSL
# (<1.0) won't be able to connect.
_log.debug("HTTPS enabled.")
kw['ssl_version'] = ssl.PROTOCOL_TLSv1
if cert:
if isinstance(cert, tuple):
# Key and cert are separate
kw['cert_file'] = cert[0]
kw['key_file'] = cert[1]
else:
# combined certificate
kw['cert_file'] = cert
if ca_cert:
kw['ca_certs'] = ca_cert
kw['cert_reqs'] = ssl.CERT_REQUIRED
self.http = urllib3.PoolManager(num_pools=10, **kw)
if self._allow_reconnect:
# we need the set of servers in the cluster in order to try
# reconnecting upon error. The cluster members will be
# added to the hosts list you provided. If you are using
# proxies, set all
#
# Beware though: if you input '127.0.0.1' as your host and
# etcd advertises 'localhost', both will be in the
# resulting list.
# If we're connecting to the original cluster, we can
# extend the list given to the client with what we get
# from self.machines
if not self._use_proxies:
self._machines_cache = list(set(self._machines_cache) |
set(self.machines))
if self._base_uri in self._machines_cache:
self._machines_cache.remove(self._base_uri)
_log.debug("Machines cache initialised to %s",
self._machines_cache)
@property
def base_uri(self):
"""URI used by the client to connect to etcd."""
return self._base_uri
@property
def host(self):
"""Node to connect etcd."""
return urlparse(self._base_uri).netloc.split(':')[0]
@property
def port(self):
"""Port to connect etcd."""
return int(urlparse(self._base_uri).netloc.split(':')[1])
@property
def protocol(self):
"""Protocol used to connect etcd."""
return self._protocol
@property
def read_timeout(self):
"""Max seconds to wait for a read."""
return self._read_timeout
@property
def allow_redirect(self):
"""Allow the client to connect to other nodes."""
return self._allow_redirect
@property
def machines(self):
"""
Members of the cluster.
Returns:
list. str with all the nodes in the cluster.
>>> print client.machines
['http://127.0.0.1:4001', 'http://127.0.0.1:4002']
"""
# We can't use api_execute here, or it causes a logical loop
try:
uri = self._base_uri + self.version_prefix + '/machines'
response = self.http.request(
self._MGET,
uri,
timeout=self.read_timeout,
redirect=self.allow_redirect)
machines = [
node.strip() for node in
self._handle_server_response(response).data.decode('utf-8').split(',')
]
_log.debug("Retrieved list of machines: %s", machines)
return machines
except (urllib3.exceptions.HTTPError,
HTTPException,
socket.error) as e:
# We can't get the list of machines, if one server is |
WaterSheltieDragon/Wango-the-Robot | faceup.py | Python | gpl-3.0 | 368 | 0.038043 | import time
import maestro
# servo 0 is left/right
# servo 1 is up/down
try:
servo = maestro.Controller()
servo.setRange(1,4000,8000)
# about 5 clicks per full motion
# 1040 for left | /right + is left, - is rig | ht.
# 800 for up/down + is up, - is down.
x = servo.getPosition(1) + 800
servo.setAccel(1,6)
servo.setTarget(1,x)
finally:
servo.close
|
mengzhuo/my-leetcode-solution | valid-anagram.py | Python | mit | 180 | 0 | class Solution(object): |
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return sorted(s) | == sorted(t)
|
sirinath/root | main/python/rootbrowse.py | Python | lgpl-2.1 | 711 | 0.002813 | #!/usr/bin/env python
# ROOT command line tools: rootbrowse
# Author: Julien Ripoche
# Mail: julien.ripoche@u-ps | ud.fr
# Date: 20/08/15
"""Command line to open a ROOT file on a TBrowser"""
import cmdLineUtils
import sys
# Help strings
COMMAND_HELP = "Open a ROOT file in a TBrowser"
EPILOG = """Examples:
- rootbrowse
Open a TBrowser
- rootbrowse file.root
Open the ROOT file 'file.root' in a TBrowser
"""
def execute():
# Collect arguments with the module argparse
parser = cmdLineUtils.getParserSingleFile(COMMAND_HELP, EPILOG)
# Put arguments in shape
args = cmdLineUtils. | getArgs(parser)
# Process rootBrowse
return cmdLineUtils.rootBrowse(args.FILE)
sys.exit(execute())
|
axiom-data-science/paegan | paegan/utils/asamath.py | Python | gpl-3.0 | 1,568 | 0.00574 | from math import sqrt, atan2, degrees, radians
class AsaMath(object):
@classmethod
def speed_direction_from_u_v(cls, **kwargs):
if "u" and "v" in kwargs:
speed = cls.__speed_from_u_v(kwargs.get('u'), kwargs.get('v'))
direction = cls.__direction_from_u_v(kwargs.get('u'), kwargs.get('v'), output=kwargs.get('output'))
return { 'speed':speed, 'direction':direction }
else:
raise TypeError( "must pass in 'u' and 'v' values ")
@classmethod
def __speed_from_u_v(cls, u, v):
return | sqrt((u*u) + (v*v))
@classmethod
def __direction_from_u_v(cls, u, v, **kwargs):
rads = atan2(v, u)
if 'output' in kwargs:
if kwargs.pop('output') == 'radians':
return rads
# if 'output' was not specified as 'radians', we return degrees
return cls.normalize_angle(angle=degrees(rads))
@classmethod
def azimuth_to_math_angle(cls, **kwargs):
return cls.normalize_angle(angle=90 - kwargs.get("azimuth"))
@ | classmethod
def math_angle_to_azimuth(cls, **kwargs):
return cls.normalize_angle(angle=(360 - kwargs.get("angle")) + 90)
@classmethod
def normalize_angle(cls, **kwargs):
return kwargs.get('angle') % 360
@classmethod
def is_number(cls, num):
try:
float(num) # for int, long and float
except TypeError:
try:
complex(num) # for complex
except TypeError:
return False
return True
|
ajackal/honey-hornet | honeyhornet/viewchecker.py | Python | gpl-3.0 | 5,097 | 0.002747 | import os
import argparse
from logger import HoneyHornetLogger
from threading import BoundedSemaphore
import threading
import logging
from datetime import date, datetime
from termcolor import colored
import http.client
import re
import time
class ViewChecker(HoneyHornetLogger):
def __init__(self, config=None):
HoneyHornetLogger.__init__(self)
self.config = config
self.verbose = False
self.banner = False
MAX_CONNECTIONS = 20 # max threads that can be created
self.CONNECTION_LOCK = BoundedSemaphore(value=MAX_CONNECTIONS)
self.TIMER_DELAY = 3 # timer delay used for Telnet testing
self.default_filepath = os.path.dirname(os.getcwd())
log_name = str(date.today()) + "_DEBUG.log"
log_name = os.path.join(self.default_filepath, "logs", log_name)
logging.basicConfig(filename=log_name, format='%(asctime)s %(levelname)s: %(mes | sage)s',
level=logging.DEBUG)
def determine_camera_model(self, vulnerable_host, https=False, retry=False):
""" simple banner grab with http.client """
ports = []
self.CONNECTION_LOCK.acquire()
service = "DETERMINE-CAMERA-MODEL"
if retry is False:
try:
host = vu | lnerable_host.ip
ports_to_check = set(vulnerable_host.ports)
except vulnerable_host.DoesNotExist:
host = str(vulnerable_host)
ports_to_check = set(ports.split(',').strip())
elif retry is True:
host = vulnerable_host
if self.verbose:
print("[*] Checking camera make & model of {0}".format(host))
logging.info('{0} set for {1} service'.format(host, service))
try:
for port in ports_to_check:
if https is True:
conn = http.client.HTTPSConnection(host, port)
else:
conn = http.client.HTTPConnection(host, port)
conn.request("GET", "/")
http_r1 = conn.getresponse()
camera_check = http_r1.read(1024)
headers = http_r1.getheaders()
if self.verbose:
print(http_r1.status, http_r1.reason)
print(http_r1.status, http_r1.reason)
results = re.findall(r"<title>(?P<camera_title>.*)</title>", str(camera_check))
if results:
print(results)
else:
print("No match for <Title> tag found.")
# puts banner into the class instance of the host
# vulnerable_host.put_banner(port, banner_txt, http_r1.status, http_r1.reason, headers)
# banner_grab_filename = str(date.today()) + "_banner_grabs.log"
# banner_grab_filename = os.path.join(self.default_filepath, "logs", banner_grab_filename)
# with open(banner_grab_filename, 'a') as banner_log:
# banner_to_log = "host={0}, http_port={1},\nheaders={2},\nbanner={3}\n".format(host, port,
# headers, banner_txt)
# banner_log.write(banner_to_log)
except http.client.HTTPException:
try:
self.determine_camera_model(host, https=True, retry=True)
except Exception as error:
logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error))
except Exception as error:
if error[0] == 104:
self.determine_camera_model(host, https=True, retry=True)
logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error))
except KeyboardInterrupt:
exit(0)
self.CONNECTION_LOCK.release()
def run_view_checker(self, hosts_to_check):
"""
Function tests hosts for default credentials on open 'admin' ports
Utilizes threading to greatly speed up the scanning
"""
service = "building_threads"
logging.info("Building threads.")
logging.info("Verbosity set to {0}".format(self.verbose))
threads = []
print("[*] Testing vulnerable host ip addresses...")
try:
for vulnerable_host in hosts_to_check:
if self.verbose:
print('[*] checking >> {0}'.format(vulnerable_host.ip))
if set(vulnerable_host.ports):
t0 = threading.Thread(target=self.determine_camera_model, args=(vulnerable_host, ))
threads.append(t0)
logging.info("Starting {0} threads.".format(len(threads)))
for thread in threads:
thread.start()
for thread in threads:
thread.join(120)
except KeyboardInterrupt:
exit(0)
except threading.ThreadError as error:
logging.exception("{0}\t{1}".format(service, error))
except Exception as e:
logging.exception(e)
|
brainysmurf/xattr3 | xattr/tool.py | Python | mit | 6,510 | 0.003533 | #!/usr/bin/env python3
##
# Copyright (c) 2007 Apple Inc.
#
# This is the MIT license. This software may also be distributed under the
# same terms as Python (the PSF license).
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
##
import sys
import os
import getopt
import xattr
import zlib
def usage(e=None):
if e:
print(e)
print("")
name = os.path.basename(sys.argv[0])
print("usage: %s [-lz] file [file ...]" % (name,))
print(" %s -p [-lz] attr_name file [file ...]" % (name,))
print(" %s -w [-z] attr_name attr_value file [file ...]" % (name,))
print(" %s -d attr_name file [file ...]" % (name,))
print("")
print("The first form lists the names of all xattrs on the given file(s).")
print("The second form (-p) prints the value of the xattr attr_name.")
print("The third form (-w) sets the value of the xattr attr_name to attr_value.")
print("The fourth form (-d) deletes the xattr attr_name.")
print("")
print("options:")
print(" -h: print this help")
print(" -l: print long format (attr_name: attr_value)")
print(" -z: compress or decompress (if compressed) attribute value in zip format")
if e:
sys.exit(64)
else:
sys.exit(0)
class NullsInString(Exception):
"""Nulls in string."""
_FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
def _dump(src, length=16):
result=[]
for i in range(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(_FILTER)
result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable))
return ''.join(result)
def main():
try:
(optargs, args) = getopt.getopt(sys.argv[1:], "hlpwdz", ["help"])
except getopt.GetoptError as e:
usage(e)
attr_name = None
long_format = False
read | = Fa | lse
write = False
delete = False
compress = lambda x: x
decompress = compress
status = 0
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt == "-l":
long_format = True
elif opt == "-p":
read = True
if write or delete:
usage("-p not allowed with -w or -d")
elif opt == "-w":
write = True
if read or delete:
usage("-w not allowed with -p or -d")
elif opt == "-d":
delete = True
if read or write:
usage("-d not allowed with -p or -w")
elif opt == "-z":
compress = zlib.compress
decompress = zlib.decompress
if write or delete:
if long_format:
usage("-l not allowed with -w or -p")
if read or write or delete:
if not args:
usage("No attr_name")
attr_name = args.pop(0)
if write:
if not args:
usage("No attr_value")
attr_value = args.pop(0)
if len(args) > 1:
multiple_files = True
else:
multiple_files = False
for filename in args:
def onError(e):
if not os.path.exists(filename):
sys.stderr.write("No such file: %s\n" % (filename,))
else:
sys.stderr.write(str(e) + "\n")
status = 1
try:
attrs = xattr.xattr(filename)
except (IOError, OSError) as e:
onError(e)
continue
if write:
try:
attrs[attr_name] = compress(attr_value)
except (IOError, OSError) as e:
onError(e)
continue
elif delete:
try:
del attrs[attr_name]
except (IOError, OSError) as e:
onError(e)
continue
except KeyError:
onError("No such xattr: %s" % (attr_name,))
continue
else:
try:
if read:
attr_names = (attr_name,)
else:
attr_names = list(attrs.keys())
except (IOError, OSError) as e:
onError(e)
continue
if multiple_files:
file_prefix = "%s: " % (filename,)
else:
file_prefix = ""
for attr_name in attr_names:
try:
try:
attr_value = decompress(attrs[attr_name])
except zlib.error:
attr_value = attrs[attr_name]
except KeyError:
onError("%sNo such xattr: %s" % (file_prefix, attr_name))
continue
if long_format:
try:
if attr_value.find('\0') >= 0:
raise NullsInString;
print("".join((file_prefix, "%s: " % (attr_name,), attr_value)))
except (UnicodeDecodeError, NullsInString):
print("".join((file_prefix, "%s:" % (attr_name,))))
print(_dump(attr_value))
else:
if read:
print("".join((file_prefix, attr_value)))
else:
print("".join((file_prefix, attr_name)))
sys.exit(status)
if __name__ == "__main__":
main()
|
openstack/swift | test/unit/common/middleware/crypto/test_encryption.py | Python | apache-2.0 | 31,916 | 0 | # Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
import json
import unittest
import uuid
from swift.common import storage_policy, constraints
from swift.common.middleware import copy
from swift.common.middleware import crypto
from swift.common.middleware.crypto import keymaster
from swift.common.middleware.crypto.crypto_utils import (
load_crypto_meta, Crypto)
from swift.common.ring import Ring
from swift.common.swob import Request, str_to_wsgi
from swift.obj import diskfile
from test.debug_logger import debug_logger
from test.unit import skip_if_no_xattrs
from test.unit.common.middleware.crypto.crypto_helpers import (
md5hex, encrypt, TEST_KEYMASTER_CONF)
from test.unit.helpers import setup_servers, teardown_servers
class TestCryptoPipelineChanges(unittest.TestCase):
# Tests the consequences of crypto middleware being in/out of the pipeline
# or having encryption disabled for PUT/GET requests on same object. Uses
# real backend servers so that the handling of headers and sysmeta is
# verified to diskfile and back.
_test_context | = None
@classmethod
def setUpClass(cls):
cls._test_context = setup_servers()
cls.proxy_app = cls._test_context["test_servers"][0]
@classmethod
def tearDownClass(cls):
if cls._test_context is not None:
teardown_servers(cls._test_context)
cls._test_context = None
def setUp(self):
skip_if_no_xattrs()
self.plaintext = b'unencrypted body content'
self.plaintext_eta | g = md5hex(self.plaintext)
self._setup_crypto_app()
def _setup_crypto_app(self, disable_encryption=False, root_secret_id=None):
# Set up a pipeline of crypto middleware ending in the proxy app so
# that tests can make requests to either the proxy server directly or
# via the crypto middleware. Make a fresh instance for each test to
# avoid any state coupling.
conf = {'disable_encryption': disable_encryption}
self.encryption = crypto.filter_factory(conf)(self.proxy_app)
self.encryption.logger = self.proxy_app.logger
km_conf = dict(TEST_KEYMASTER_CONF)
if root_secret_id is not None:
km_conf['active_root_secret_id'] = root_secret_id
self.km = keymaster.KeyMaster(self.encryption, km_conf)
self.crypto_app = self.km # for clarity
self.crypto_app.logger = self.encryption.logger
def _create_container(self, app, policy_name='one', container_path=None):
if not container_path:
# choose new container name so that the policy can be specified
self.container_name = uuid.uuid4().hex
self.container_path = 'http://foo:8080/v1/a/' + self.container_name
self.object_name = 'o'
self.object_path = self.container_path + '/' + self.object_name
container_path = self.container_path
req = Request.blank(
str_to_wsgi(container_path), method='PUT',
headers={'X-Storage-Policy': policy_name})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
# sanity check
req = Request.blank(
str_to_wsgi(container_path), method='HEAD',
headers={'X-Storage-Policy': policy_name})
resp = req.get_response(app)
self.assertEqual(policy_name, resp.headers['X-Storage-Policy'])
def _put_object(self, app, body):
req = Request.blank(
str_to_wsgi(self.object_path), method='PUT', body=body,
headers={'Content-Type': 'application/test'})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
return resp
def _post_object(self, app):
req = Request.blank(str_to_wsgi(self.object_path), method='POST',
headers={'Content-Type': 'application/test',
'X-Object-Meta-Fruit': 'Kiwi'})
resp = req.get_response(app)
self.assertEqual('202 Accepted', resp.status)
return resp
def _copy_object(self, app, destination):
req = Request.blank(str_to_wsgi(self.object_path), method='COPY',
headers={'Destination': destination})
resp = req.get_response(app)
self.assertEqual('201 Created', resp.status)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
return resp
def _check_GET_and_HEAD(self, app, object_path=None):
object_path = str_to_wsgi(object_path or self.object_path)
req = Request.blank(object_path, method='GET')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(self.plaintext, resp.body)
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
req = Request.blank(object_path, method='HEAD')
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
def _check_match_requests(self, method, app, object_path=None):
object_path = str_to_wsgi(object_path or self.object_path)
# verify conditional match requests
expected_body = self.plaintext if method == 'GET' else b''
# If-Match matches
req = Request.blank(object_path, method=method,
headers={'If-Match': '"%s"' % self.plaintext_etag})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(expected_body, resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
# If-Match wildcard
req = Request.blank(object_path, method=method,
headers={'If-Match': '*'})
resp = req.get_response(app)
self.assertEqual('200 OK', resp.status)
self.assertEqual(expected_body, resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit'])
# If-Match does not match
req = Request.blank(object_path, method=method,
headers={'If-Match': '"not the etag"'})
resp = req.get_response(app)
self.assertEqual('412 Precondition Failed', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match matches
req = Request.blank(
object_path, method=method,
headers={'If-None-Match': '"%s"' % self.plaintext_etag})
resp = req.get_response(app)
self.assertEqual('304 Not Modified', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match wildcard
req = Request.blank(object_path, method=method,
headers={'If-None-Match': '*'})
resp = req.get_response(app)
self.assertEqual('304 Not Modified', resp.status)
self.assertEqual(b'', resp.body)
self.assertEqual(self.plaintext_etag, resp.headers['Etag'])
# If-None-Match does not match
req = Request.blank(object_path, method=method,
headers={'If-None-Match': '"not the etag"'})
resp = req.get_response(app)
self.assertEqual('200 OK', |
Centre-Alt-Rendiment-Esportiu/att | src/python/test/test_classes/PingPongAppMock.py | Python | gpl-3.0 | 376 | 0.00266 | from test.classes.PingPongApp import PingPongApp
from test.test_classes.BallTrackerMock import BallTrackerMock
class PingPongAppMock(PingPongApp):
d | ef __init__(self, args):
super(PingPongAppMock, self).__init__(args)
self.ball_tracker = BallTrackerMock()
def run(self):
super(PingPongAppMock, self).run()
self.ball_tracker.end | _test() |
Matvey-Kuk/cspark-python | examples/1_room_mentions_counter.py | Python | mit | 1,702 | 0.00235 | ##########################################
# Check examples/0_simple_echo.py before #
##########################################
from cspark.Updater import Updater
from cspark.EventTypeRouter import EventTypeRouter
from cspark.UpdateHandler import UpdateHandler
from cspark.SQLiteContextEngine import SQLiteContextEngine
from cspark.MessageResponse import MessageResponse
updater = Updater(
access_token="",
)
class RoomMentionsCounterUpdateHandler(UpdateHandler, SQLiteContextEngine):
"""
Handler should process messages from user and response with answers.
This class inheri | ted from UpdateHandler and PeeweeContextStorage.
UpdateHandler gives you "self.send_response" to send answers.
PeeweeContextStorage gives you "self.context" which is a dictionary.
You can save your data there for future. It's stateful container,
which stores your data in Peewee ORM (SQLite by default).
"""
def handle_update(self):
if 'counter' not in self.co | ntext.room:
self.context.room['counter'] = 1
else:
self.context.room['counter'] += 1
self.send_response(
MessageResponse("Room counter: " + str(self.context.room['counter']))
)
class Router(EventTypeRouter):
"""
Router should decide which message should be processed by which handler.
This router is inherited from EventTypeRouter which divide updates by their type.
For example this router set RoomMentionsCounterUpdateHandler for updates which are messages.
"""
new_message_handler = RoomMentionsCounterUpdateHandler
# Now we need to register router
updater.add_router(Router)
# And start "event loop"
updater.idle()
|
scanlime/flipsyfat | flipsyfat/cores/sd_emulator/core.py | Python | mit | 4,333 | 0.001846 | from flipsyfat.cores.sd_emulator.linklayer import SDLinkLayer
from migen import *
from misoc.interconnect.csr import *
from misoc.interconnect.csr_eventmanager import *
from misoc.interconnect import wishbone
class SDEmulator(Module, AutoCSR):
"""Core for emulating SD card memory a block at a time,
with reads and writes backed by software.
"""
# Read and write buffers, each a single 512 byte block
mem_size = 1024
def _connect_event(self, ev, act, done):
# Event triggered on 'act' positive edge, pulses 'done' on clear
prev_act = Signal()
self.sync += prev_act.eq(act)
self.comb += ev.trigger.eq(act & ~prev_act)
self.comb += done.eq(ev.clear)
def __init__(self, platform, pads, **kwargs):
self.submodules.ll = ClockDomainsRenamer("local")(SDLinkLayer(platform, pads, **kwargs))
# Event interrupts and acknowledgment
self.submodules.ev = EventManager()
self.ev.read = EventSourcePulse()
self.ev.write = EventSourcePulse()
self.ev.finalize()
self._connect_event(self.ev.read, self.ll.block_read_act, self.ll.block_read_go)
self._connect_event(self.ev.write, self.ll.block_write_act, self.ll.block_write_done)
# Wishbone access to SRAM buffers
self.bus = wishbone.Interface()
self.submodules.wb_rd_buffer = wishbone.SRAM(self.ll.rd_buffer, read_only=False)
self.submodules.wb_wr_buffer = wishbone.SRAM(self.ll.wr_buffer, read_only=False)
wb_slaves = [
(lambda a: a[9] == 0, self.wb_rd_buffer.bus),
(lambda a: a[9] == 1, self.wb_wr_buffer.bus)
]
self.submodules.wb_decoder = wishbone.Decoder(self.bus, wb_slaves, register=True)
# Local reset domain
self._reset = CSRStorage()
self.clock_domains | .cd_local = ClockDomain()
self.comb += self.cd_local.clk.eq(ClockSignal())
self.comb += self.cd_local.rst.eq(ResetSignal() | self._reset.storage)
# Current data operation
self._read_act = CSRStatus()
s | elf._read_addr = CSRStatus(32)
self._read_byteaddr = CSRStatus(32)
self._read_num = CSRStatus(32)
self._read_stop = CSRStatus()
self._write_act = CSRStatus()
self._write_addr = CSRStatus(32)
self._write_byteaddr = CSRStatus(32)
self._write_num = CSRStatus(32)
self._preerase_num = CSRStatus(23)
self._erase_start = CSRStatus(32)
self._erase_end = CSRStatus(32)
self.comb += [
self._read_act.status.eq(self.ll.block_read_act),
self._read_addr.status.eq(self.ll.block_read_addr),
self._read_byteaddr.status.eq(self.ll.block_read_byteaddr),
self._read_num.status.eq(self.ll.block_read_num),
self._read_stop.status.eq(self.ll.block_read_stop),
self._write_act.status.eq(self.ll.block_write_act),
self._write_addr.status.eq(self.ll.block_write_addr),
self._write_byteaddr.status.eq(self.ll.block_write_byteaddr),
self._write_num.status.eq(self.ll.block_write_num),
self._preerase_num.status.eq(self.ll.block_preerase_num),
self._erase_start.status.eq(self.ll.block_erase_start),
self._erase_end.status.eq(self.ll.block_erase_end),
]
# Informational registers, not needed for data transfer
self._info_bits = CSRStatus(16)
self.comb += self._info_bits.status.eq(Cat(
self.ll.mode_4bit,
self.ll.mode_spi,
self.ll.host_hc_support,
Constant(False), # Reserved bit 3
Constant(False), # Reserved bit 4
Constant(False), # Reserved bit 5
Constant(False), # Reserved bit 6
Constant(False), # Reserved bit 7
self.ll.info_card_desel,
self.ll.err_op_out_range,
self.ll.err_unhandled_cmd,
self.ll.err_cmd_crc,
))
self._most_recent_cmd = CSRStatus(len(self.ll.cmd_in_cmd))
self.comb += self._most_recent_cmd.status.eq(self.ll.cmd_in_cmd)
self._card_status = CSRStatus(len(self.ll.card_status))
self.comb += self._card_status.status.eq(self.ll.card_status)
|
PastebinArchiveReader/PAR | PAR.py | Python | gpl-2.0 | 1,869 | 0.00428 | """
Python Archive Reader 1.0
https://github.com/PastebinArchiveReader/PAR
"""
import requests
from bs4 import BeautifulSoup
import urllib
import argparse
import time
# add parsing functionality to provide files
parser = argparse.ArgumentParser(description="Script to download pastebin.com archives",
epilog='''You can download different archives from pastebin.com with this script.
Simply specify a language, extension and path.''')
parser.add_argument("-l", "--language", dest="language", help="specify the programming language",
metavar="python, csharp, cpp, etc.")
parser.add_argument("-e", "--extension", dest="extension", help="file extension of the language",
metavar="extension")
parser.add_argument("-p", "--path", dest="path", help="where to save the download | ed files",
metavar="/home/anon/scripts/")
args = parser.parse_args()
url = "http://pastebin.com/archive/" + args.language
while 1:
source = requests.get(url)
soup = BeautifulSoup(source.text)
for link in soup.find_all('a'):
if len(link.get('href')) == 9:
if link.get('href') != "/settings": # "/settings" is just a 9-characters configuration file fr | om Pastebin.com. Pointless.
ID = link.get('href')
paste = link.get('href').replace('/', '')
paste = "http://www.pastebin.com/raw.php?i=" + paste
print("[?] {}".format(paste))
downloaded_file = args.path + "/" + ID + args.extension
urllib.urlretrieve(paste, downloaded_file)
print("[!] Downloaded !\n")
time.sleep(3.5) # If the delay is smaller, Pastebin.com will block your IP
print("Finished !")
time.sleep(1)
print("Restarting...")
|
psederberg/pynamite | docs/examples/wish.py | Python | gpl-3.0 | 1,653 | 0.009679 | #
# My wish for Pynamite
#
from __future__ import with_statement
from pynamite import *
from pynamite.actor import TextBox
def scene1():
| # define some actors
x = TextBox("Pynamite")
y = TextBox("Rocks!!!")
# tell the first actor to enter
enter(x)
# wait for a keypress to continue
pause()
# fade out one actor while other comes in
# # You can use with blocks
| # with parallel():
# fadeout(1.0,x)
# fadein(1.0,y)
# Or the functional notation
set_var(y, "opacity", 0.0)
enter(y)
def together():
fadeout(4.0,x)
with serial():
linear(y, "opacity", end_val=.5, duration=1.0)
linear(y, "opacity", end_val=.0, duration=1.0)
linear(y, "opacity", end_val=1.0, duration=2.0)
#fadeout(.5,y)
#fadein(.5,y)
in_parallel(together)
# wait for intput
pause()
# last actor leaves
fadeout(1.0,y)
pause()
# add that scene to the play
add_scene(scene1)
def scene2():
# define the actor
x = TextBox("Yes, it Rocks!!!")
# set its opacity to 0.0
set_var(x, "opacity", 0.0)
# have it enter (but remember it's still not visible)
enter(x)
# have it become visible, but in a fancy way
smooth(x, "opacity", end_val=.5,duration=.5)
smooth(x, "opacity", end_val=.25,duration=.25)
smooth(x, "opacity", end_val=.75,duration=.5)
smooth(x, "opacity", end_val=.5,duration=.25)
smooth(x, "opacity", end_val=1.0,duration=.5)
# wait for input
pause()
# have the actor leave
leave()
# add this scene
add_scene(scene2)
# run it
run()
|
TamiaLab/carnetdumaker | apps/snippets/apps.py | Python | agpl-3.0 | 323 | 0 | """
Application file for the code snippets app.
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SnippetsConf | ig(AppConfig):
"""
Application configuration class for the code snippets app.
"""
name = 'apps.snippets'
verbose_name = _('Code snippets') | |
Extremus-io/djwebsockets | djwebsockets/mixins/__init__.py | Python | mit | 358 | 0 | class BaseWSMixin(object):
@classmethod
def on_connect(cls | , socket, path):
pass
@classmethod
def on_message(cls, socket, message):
pass
@classmethod
def on_close(cls, socket):
pass
class MixinFa | il(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
|
googleapis/python-analytics-data | google/analytics/data_v1alpha/services/alpha_analytics_data/transports/grpc_asyncio.py | Python | apache-2.0 | 19,474 | 0.002003 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.analytics.data_v1alpha.types import analytics_data_api
from .base import AlphaAnalyticsDataTransport, DEFAULT_CLIENT_INFO
from .grpc import AlphaAnalyticsDataGrpcTransport
class AlphaAnalyticsDataGrpcAsyncIOTransport(AlphaAnalyticsDataTransport):
"""gRPC AsyncIO backend transport for AlphaAnalyticsData.
Google Analytics reporting data service.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "analyticsdata.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
g | oogle.api_core.exceptions.DuplicateCredentialA | rgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
|
quizlet/grpc | src/python/grpcio/grpc/_auth.py | Python | apache-2.0 | 2,543 | 0 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRPCAuthMetadataPlugins for standard authentication."""
import inspect
from concurrent import futures
import grpc
def _sign_request(callback, token, error):
metadata = (('authorization', 'Bearer {}'.format(token)),)
callback(metadata, error)
def _create_get_token_callback(callback):
def get_token_callback(future):
try:
access_token = future.result().access_token
except Exception as exception: # pylint: disable=broad-except
_sign_request(callback, None, exception)
else:
_sign_request(callback, access_token, None)
return get_token_callback
class GoogleCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for GoogleCredentials from the oauth2client library."""
def __init__(self, credentials):
self._credentials = credentials
self._pool = futures.ThreadPoolExecutor(max_workers | =1)
# Hack to determine if these are JWT creds and we need to pass
# additional_claims when getting a token
self._is_jwt = 'additional_claims' in inspect.getargspec(
credentials.get_access_token).args
def __call__(self, context, callback):
# MetadataPlugins cannot block (see grpc.beta.interfaces.py)
if self._is_jwt:
future = self._pool.submit(
self._credentials.get_access_token,
| additional_claims={'aud': context.service_url})
else:
future = self._pool.submit(self._credentials.get_access_token)
future.add_done_callback(_create_get_token_callback(callback))
def __del__(self):
self._pool.shutdown(wait=False)
class AccessTokenCallCredentials(grpc.AuthMetadataPlugin):
"""Metadata wrapper for raw access token credentials."""
def __init__(self, access_token):
self._access_token = access_token
def __call__(self, context, callback):
_sign_request(callback, self._access_token, None)
|
code4bones/Triton | examples/crackme_hash_collision.py | Python | lgpl-3.0 | 5,246 | 0.006672 |
from triton import *
import smt2lib
#
# This example breaks a simple hash routine.
#
# Check the ./samples/crackmes/crackme_hash.c file. This file builds
# a 'hash' and checks the checksum 0xad6d.
#
# The needed password is 'elite'. Example:
# $ ./samples/crackmes/crackme_hash elite
# Win
#
# This Triton code will try to break and find a hash collision.
#
# $ ./triton ./examples/crackme_hash_collision.py ./samples/crackmes/crackme_hash aaaaa
# [+] Please wait, computing in progress...
# {'SymVar_1': "0x72, 'r'", 'SymVar_0': "0x6c, 'l'", 'SymVar_3': "0x78, 'x'", 'SymVar_2': "0x64, 'd'", 'SymVar_4': "0x71, 'q'"}
# {'SymVar_1': "0x78, 'x'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x61, 'a'", 'SymVar_4': "0x6a, 'j'"}
# {'SymVar_1': "0x68, 'h'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x61, 'a'", 'SymVar_4': "0x7a, 'z'"}
# {'SymVar_1': "0x70, 'p'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x61, 'a'", 'SymVar_4': "0x62, 'b'"}
# {'SymVar_1': "0x72, 'r'", 'SymVar_0': "0x6f, 'o'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x62, 'b'", 'SymVa | r_4': "0x62, 'b'"}
# {'SymVar_1': "0x7a, 'z'", 'SymVar_0': "0x6f, 'o'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x6a, 'j'"}
| # {'SymVar_1': "0x7a, 'z'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x63, 'c'", 'SymVar_4': "0x6a, 'j'"}
# {'SymVar_1': "0x78, 'x'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x62, 'b'", 'SymVar_2': "0x63, 'c'", 'SymVar_4': "0x68, 'h'"}
# {'SymVar_1': "0x78, 'x'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x72, 'r'", 'SymVar_2': "0x63, 'c'", 'SymVar_4': "0x78, 'x'"}
# {'SymVar_1': "0x7a, 'z'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x70, 'p'", 'SymVar_2': "0x63, 'c'", 'SymVar_4': "0x78, 'x'"}
# {'SymVar_1': "0x7a, 'z'", 'SymVar_0': "0x6f, 'o'", 'SymVar_3': "0x70, 'p'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x78, 'x'"}
# {'SymVar_1': "0x70, 'p'", 'SymVar_0': "0x6b, 'k'", 'SymVar_3': "0x72, 'r'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x74, 't'"}
# {'SymVar_1': "0x72, 'r'", 'SymVar_0': "0x6f, 'o'", 'SymVar_3': "0x70, 'p'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x72, 'r'", 'SymVar_0': "0x6f, 'o'", 'SymVar_3': "0x72, 'r'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x72, 'r'"}
# {'SymVar_1': "0x72, 'r'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x73, 's'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x62, 'b'", 'SymVar_0': "0x6e, 'n'", 'SymVar_3': "0x63, 'c'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x6a, 'j'", 'SymVar_0': "0x66, 'f'", 'SymVar_3': "0x73, 's'", 'SymVar_2': "0x62, 'b'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x62, 'b'", 'SymVar_0': "0x62, 'b'", 'SymVar_3': "0x73, 's'", 'SymVar_2': "0x66, 'f'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x62, 'b'", 'SymVar_0': "0x62, 'b'", 'SymVar_3': "0x70, 'p'", 'SymVar_2': "0x67, 'g'", 'SymVar_4': "0x70, 'p'"}
# {'SymVar_1': "0x62, 'b'", 'SymVar_0': "0x62, 'b'", 'SymVar_3': "0x72, 'r'", 'SymVar_2': "0x67, 'g'", 'SymVar_4': "0x72, 'r'"}
# loose
#
# Triton found several collisions. Example with the first collision:
# $ ./samples/crackmes/crackme_hash lrdxq
# Win
# $
#
def cafter(instruction):
# movzx esi,BYTE PTR [rax]
# RAX points on the user password
if instruction.getAddress() == 0x400572:
rsiId = getRegSymbolicID(IDREF.REG.RSI)
convertExprToSymVar(rsiId, 64)
# mov eax,DWORD PTR [rbp-0x4]
# RAX must be equal to 0xad6d to win
if instruction.getAddress() == 0x4005c5:
print '[+] Please wait, computing in progress...'
raxId = getRegSymbolicID(IDREF.REG.RAX)
raxExpr = getFullExpression(getSymExpr(raxId).getAst())
# We want printable characters
expr = smt2lib.compound([
smt2lib.smtAssert(smt2lib.bvugt(smt2lib.variable('SymVar_0'), smt2lib.bv(96, 64))),
smt2lib.smtAssert(smt2lib.bvult(smt2lib.variable('SymVar_0'), smt2lib.bv(123, 64))),
smt2lib.smtAssert(smt2lib.bvugt(smt2lib.variable('SymVar_1'), smt2lib.bv(96, 64))),
smt2lib.smtAssert(smt2lib.bvult(smt2lib.variable('SymVar_1'), smt2lib.bv(123, 64))),
smt2lib.smtAssert(smt2lib.bvugt(smt2lib.variable('SymVar_2'), smt2lib.bv(96, 64))),
smt2lib.smtAssert(smt2lib.bvult(smt2lib.variable('SymVar_2'), smt2lib.bv(123, 64))),
smt2lib.smtAssert(smt2lib.bvugt(smt2lib.variable('SymVar_3'), smt2lib.bv(96, 64))),
smt2lib.smtAssert(smt2lib.bvult(smt2lib.variable('SymVar_3'), smt2lib.bv(123, 64))),
smt2lib.smtAssert(smt2lib.bvugt(smt2lib.variable('SymVar_4'), smt2lib.bv(96, 64))),
smt2lib.smtAssert(smt2lib.bvult(smt2lib.variable('SymVar_4'), smt2lib.bv(123, 64))),
smt2lib.smtAssert(smt2lib.equal(raxExpr, smt2lib.bv(0xad6d, 64))) # collision: (assert (= rax 0xad6d)
])
# Get max 20 different models
models = getModels(expr, 20)
for model in models:
print {k: "0x%x, '%c'" % (v, v) for k, v in model.items()}
if __name__ == '__main__':
startAnalysisFromSymbol('check')
addCallback(cafter, IDREF.CALLBACK.AFTER)
runProgram()
|
nop33/indico-plugins | piwik/indico_piwik/queries/utils.py | Python | gpl-3.0 | 1,914 | 0.001045 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import json
from flask_pluginengine import current_plugin
def get_json_from_remote_server(func, default={}, **kwargs):
"""
Safely manage calls to the remote server by encapsulating JSON creation
from Piwik data.
"""
rawjson = func(**kwargs)
try:
data = json.loads(rawjson)
if isinstance(data, dict) and data.get('result') == 'error':
current_plugin.logger.error('The Piwik se | rver responded with an error: %s', data['message'])
return {}
return data
except Exception:
current_plugin.logger.exception('Unable to load JSON from sourc | e %s', rawjson)
return default
def reduce_json(data):
"""Reduce a JSON object"""
return reduce(lambda x, y: int(x) + int(y), data.values())
def stringify_seconds(seconds=0):
"""
Takes time as a value of seconds and deduces the delta in human-readable
HHh MMm SSs format.
"""
seconds = int(seconds)
minutes = seconds / 60
ti = {'h': 0, 'm': 0, 's': 0}
if seconds > 0:
ti['s'] = seconds % 60
ti['m'] = minutes % 60
ti['h'] = minutes / 60
return "%dh %dm %ds" % (ti['h'], ti['m'], ti['s'])
|
Janzert/halite_ranking | rating_stats.py | Python | mit | 8,861 | 0.003611 | #!/usr/bin/env python3
import argparse
import json
import math
import sys
from collections import defaultdict
import trueskill
import matplotlib.pyplot as plot
import utility
def phi(x):
"""Cumulative distribution function for the standard normal distribution
Taken from python math module documentation"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
def ts_winp(a, b, env=None):
"""Win probability of player a over b given their t | rueskill ratings.
Formula found at https://github.com/sublee/trueskill/issues/1#issuecomment-244699989"""
if not env:
env = trueskill.global_env()
epsilon = trueskill.calc_draw_margin(env.draw_probability, 2)
denom = math.sqrt(a.sigma**2 + b. | sigma**2 + (2 * env.beta**2))
return phi((a.mu - b.mu - epsilon) / denom)
def wl_winp(a, b):
ciq = math.sqrt(a.sigma**2 + b.sigma**2 + (2 * (25/6)**2))
return 1 / (1 + math.exp((b.mu - a.mu) / ciq))
def pl_winp(a, b):
"""Win probability of player a over b given their PL ratings."""
return a / (a + b)
def ratings_rmse(game_results, ratings, winp_func, subjects=None):
sum_errors = 0
num_missed = 0
num_predictions = 0
for game in game_results:
gameranks = sorted(game.items(), key=lambda x: x[1])
for pix, (player, prank) in enumerate(gameranks[:-1]):
for opp, orank in gameranks[pix+1:]:
if subjects and player not in subjects and opp not in subjects:
continue
if player not in ratings or opp not in ratings:
num_missed += 1
continue
winp = winp_func(ratings[player], ratings[opp])
winr = 1 if prank < orank else 0
sum_errors += (winp - winr)**2
num_predictions += 1
if num_missed:
print("Could not make a prediction for %d pairs." % (
num_missed,))
print("With %d predictions made." % (num_predictions,))
return math.sqrt(sum_errors / num_predictions)
def ts_order(a, b):
return (a.mu - (a.sigma * 3)) > (b.mu - (b.sigma * 3))
def pl_order(a, b):
return a > b
def ratings_order_error(game_results, ratings, rank_order, subjects=None):
num_wrong = 0
num_missed = 0
num_predictions = 0
for game in game_results:
gameranks = sorted(game.items(), key=lambda x: x[1])
for pix, (player, prank) in enumerate(gameranks[:-1]):
for opp, orank in gameranks[pix+1:]:
if subjects and player not in subjects and opp not in subjects:
continue
if player not in ratings or opp not in ratings:
num_missed += 1
continue
better = rank_order(ratings[player], ratings[opp])
worse = rank_order(ratings[opp], ratings[player])
# if player rating is indecisive, count as wrong prediction
# see Weng and Lin 2011 Section 6
if (better == worse) or (better != (prank < orank)):
num_wrong += 1
num_predictions += 1
if num_missed:
print("Could not make a prediction for %d pairs." % (
num_missed,))
print("With %d predictions made." % (num_predictions,))
return num_wrong / num_predictions
def best_scores(game_results):
player_wins = defaultdict(lambda: defaultdict(int))
for game in game_results:
for player, prank in game.items():
for opp, orank in game.items():
if player == opp:
continue
if prank < orank:
player_wins[player][opp] += 1
ratings = {p: p for g in game_results for p in g}
def pwin(a, b):
if player_wins[a][b] == 0:
return 0
if player_wins[b][a] == 0:
return 1
return player_wins[a][b] / (player_wins[a][b] + player_wins[b][a])
def rank_order(a, b):
return player_wins[a][b] > player_wins[b][a]
rmse = ratings_rmse(game_results, ratings, pwin)
print("True probability RMSE %f" % (rmse,))
order_ratio = ratings_order_error(game_results, ratings, rank_order)
print("True probability incorrectly ordered %f%% results" % (order_ratio * 100,))
def load_ts_ratings(filename):
ratings = dict()
with open(filename) as rfile:
for line in rfile:
rank, player, score, mu, sigma = line.split(",")
rating = trueskill.Rating(mu=float(mu), sigma=float(sigma))
ratings[player.strip()] = rating
return ratings
def load_pl_ratings(filename):
ratings = dict()
with open(filename) as rfile:
for line in rfile:
rank, player, rating = line.split(",")
ratings[player.strip()] = float(rating)
return ratings
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser("Gather various performance statistics from ratings.")
parser.add_argument("game_files", nargs="+",
help="Json files containing game data.")
parser.add_argument("-n", "--num-games", type=int,
help="Limit the number of games used (positive for first, negative for last")
parser.add_argument("--remove-suspect", action="store_true",
help="Filter out suspect games based on workerID.")
parser.add_argument("--no-error", action="store_true",
help="Filter out games that had bot errors.")
parser.add_argument("-r", "--ratings", required=True,
help="File with ratings of players.")
parser.add_argument("--subjects",
help="File with players to include.")
parser.add_argument("--subjects-num", type=int,
help="Only use first n subjects.")
parser.add_argument("--calc-best", action="store_true",
help="Calculate best possible rates using true win percentages.")
parser.add_argument("--type", choices=["ts", "wl"],
help="Type of ratings, ts=trueskill or wl=Weng-Lin.")
config = parser.parse_args(args)
with open(config.ratings) as rfile:
line = rfile.readline()
fnum = len(line.split(","))
if fnum == 3:
load_ratings = load_pl_ratings
winp = pl_winp
rank_order = pl_order
print("Detected plackett-luce ratings.")
elif fnum == 5:
load_ratings = load_ts_ratings
if not config.type:
print("Rating type not given, use --type argument.")
return
if config.type == "ts":
winp = ts_winp
rank_order = ts_order
print("Detected trueskill ratings.")
elif config.type == "wl":
winp = wl_winp
rank_order = ts_order
print("Detected Weng-Lin ratings.")
ratings = load_ratings(config.ratings)
print("Loaded ratings for %d players." % (len(ratings)))
if config.subjects:
with open(config.subjects) as sfile:
slines = sfile.readlines()
if len(slines[0].split(",")) > 1:
slines = [l.split(",")[1] for l in slines]
if config.subjects_num:
if config.subjects_num > 0:
slines = slines[:config.subjects_num]
else:
slines = slines[config.subjects_num:]
subjects = frozenset(l.strip() for l in slines)
print("Restricting stats to %d players" % (len(subjects),))
else:
subjects = None
games = utility.load_games(config.game_files)
if config.no_error:
games = utility.filter_error_games(games)
print("Filtered out error games, leaving %d" % (len(games),))
if config.remove_suspect:
start_num = len(games)
games = utility.filter_suspect_games(games)
print("Filtered out %d suspect games, leaving %d" % (
start_num - len(games), len(games)))
game_results = [{"%s (%s)" % (u['username'], u['userID']): int(u['rank'])
for u in g['users']}
for g in games]
if config.num_games:
if config.num_games > 0:
game |
lancepants/notes | lern/fibonacci.py | Python | gpl-3.0 | 857 | 0.015169 | #!/usr/bin/python3
'''
The fibonacci series is a series of numbers in which each number (Fibonacci
number) is the sum of the two preceding numbers. The simplest is the series 1,
1, 2, 3, 5, 8, 13, 21 etc.
It's a common example used when learning recursion. Its inefficiency also leads
to an alternate, iterative, "dynamic programming" memoization solution.
'''
# Using recursion
def fib(n):
if n < 2:
return 1
return f | ib(n-1) + fib(n-2)
# Instead of recursion, iterate by populating a table using the previous two
# values in said table.
def fibdp(n):
fibresult = | {}
# populate our first two values, aka "base cases"
fibresult[0] = 1
fibresult[1] = 1
for i in range(2,n):
fibresult[i] = fibresult[i-1] + fibresult[i-2]
return fibresult.values()
# See which of these takes longer
for i in range(1,40):
print(fib(i))
fibdp(40)
|
rohitranjan1991/home-assistant | tests/components/zwave/test_light.py | Python | mit | 16,201 | 0.000864 | """Test Z-Wave lights."""
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components import zwave
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
SUPPORT_TRANSITION,
)
from homeassistant.components.zwave import const, light
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
# Integration is disabled
pytest.skip("Integration has been disabled in the manifest", allow_module_level=True)
class MockLightValues(MockEntityValues):
"""Mock Z-Wave light values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.dimming_duration = None
self.color = None
self.color_channels = None
super().__init__(**kwargs)
def test_get_device_detects_dimmer(mock_openzwave):
"""Test get_device returns a normal dimmer."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveDimmer)
assert device.color_mode == COLOR_MODE_BRIGHTNESS
assert device.supported_features == 0
assert device.supported_color_modes == {COLOR_MODE_BRIGHTNESS}
def test_get_device_detects_colorlight(mock_openzwave):
"""Test get_device returns a color light."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveColorLight)
assert device.color_mode == COLOR_MODE_RGB
assert device.supported_features == 0
assert device.supported_color_modes == {COLOR_MODE_RGB}
def test_get_device_detects_zw098(mock_openzwave):
"""Test get_device returns a zw098 color light."""
node = MockNode(
manufacturer_id="0086",
product_id="0062",
command_classes=[const.COMMAND_CLASS_SWITCH_COLOR],
)
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert isinstance(device, light.ZwaveColorLight)
assert device.color_mode == COLOR_MODE_RGB
assert device.supported_features == 0
assert device.supported_color_modes == {CO | LOR_MODE_COLOR_TEMP, COLOR_MODE_RGB}
def test_get_device_detects_rgbw_light(mock_openzwave):
"""Test get_device returns a color light."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
value = MockVa | lue(data=0, node=node)
color = MockValue(data="#0000000000", node=node)
color_channels = MockValue(data=0x1D, node=node)
values = MockLightValues(primary=value, color=color, color_channels=color_channels)
device = light.get_device(node=node, values=values, node_config={})
device.value_added()
assert isinstance(device, light.ZwaveColorLight)
assert device.color_mode == COLOR_MODE_RGBW
assert device.supported_features == 0
assert device.supported_color_modes == {COLOR_MODE_RGBW}
def test_dimmer_turn_on(mock_openzwave):
"""Test turning on a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 255
node.reset_mock()
device.turn_on(**{ATTR_BRIGHTNESS: 224})
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 87 # round(224 / 255 * 99)
node.reset_mock()
device.turn_on(**{ATTR_BRIGHTNESS: 120})
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 47 # round(120 / 255 * 99)
with patch.object(light, "_LOGGER", MagicMock()) as mock_logger:
device.turn_on(**{ATTR_TRANSITION: 35})
assert mock_logger.debug.called
assert node.set_dimmer.called
msg, entity_id = mock_logger.debug.mock_calls[0][1]
assert entity_id == device.entity_id
def test_dimmer_min_brightness(mock_openzwave):
"""Test turning on a dimmable Z-Wave light to its minimum brightness."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert not device.is_on
device.turn_on(**{ATTR_BRIGHTNESS: 1})
assert device.is_on
assert device.brightness == 1
device.turn_on(**{ATTR_BRIGHTNESS: 0})
assert device.is_on
assert device.brightness == 0
def test_dimmer_transitions(mock_openzwave):
"""Test dimming transition on a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=0, node=node)
duration = MockValue(data=0, node=node)
values = MockLightValues(primary=value, dimming_duration=duration)
device = light.get_device(node=node, values=values, node_config={})
assert device.color_mode == COLOR_MODE_BRIGHTNESS
assert device.supported_features == SUPPORT_TRANSITION
assert device.supported_color_modes == {COLOR_MODE_BRIGHTNESS}
# Test turn_on
# Factory Default
device.turn_on()
assert duration.data == 0xFF
# Seconds transition
device.turn_on(**{ATTR_TRANSITION: 45})
assert duration.data == 45
# Minutes transition
device.turn_on(**{ATTR_TRANSITION: 245})
assert duration.data == 0x83
# Clipped transition
device.turn_on(**{ATTR_TRANSITION: 10000})
assert duration.data == 0xFE
# Test turn_off
# Factory Default
device.turn_off()
assert duration.data == 0xFF
# Seconds transition
device.turn_off(**{ATTR_TRANSITION: 45})
assert duration.data == 45
# Minutes transition
device.turn_off(**{ATTR_TRANSITION: 245})
assert duration.data == 0x83
# Clipped transition
device.turn_off(**{ATTR_TRANSITION: 10000})
assert duration.data == 0xFE
def test_dimmer_turn_off(mock_openzwave):
"""Test turning off a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=46, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
device.turn_off()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
def test_dimmer_value_changed(mock_openzwave):
"""Test value changed for dimmer lights."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = 46
value_changed(value)
assert device.is_on
assert device.brightness == 118
def test_dimmer_refresh_value(mock_openzwave):
"""Test value changed for dimmer lights."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockLightValues(primary=value)
device = light.get_device(
node=node,
values=values,
node_config={zwave.CONF_REFRESH_VALUE: True, zwave.CONF_REFRESH_DELAY: 5},
)
assert not device.is_on
with patch.object(light, "Timer") as mock_timer:
value.data = 46
value_changed(value)
assert not device.is_on
assert mock_timer.called
assert len(mock_timer.mock_calls) == 2
timeout, callback = mock_timer.mock_calls[0][1][:2]
assert timeout == 5
assert mock_timer().start.called
assert len(mock_timer().start.mock_calls) == 1
|
jonaustin/advisoryscan | django/tests/regressiontests/fixtures_regress/models.py | Python | mit | 854 | 0.008197 | from django.db import models
class Animal(models.Model):
name = models.CharField(maxlength=150)
latin_name = models.CharField(maxlength=150)
def __str__(self):
| return self.common_name
class Plant(models.Model):
name = models.CharField(maxlength=150)
class Meta:
# For testing | when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
__test__ = {'API_TESTS':"""
>>> from django.core import management
# Load a fixture that uses PK=1
>>> management.load_data(['sequence'], verbosity=0)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
# This is a regression test for ticket #3790.
>>> animal = Animal(name='Platypus', latin_name='Ornithorhynchus anatinus')
>>> animal.save()
"""} |
edgedb/edgedb | edb/schema/links.py | Python | apache-2.0 | 21,887 | 0 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb import errors
from . import abc as s_abc
from . import constraints
from . import delta as sd
from . import indexes
from . import inheriting
from . import properties
from . import name as sn
from . import objects as so
from . import pointers
from . import referencing
from . import sources
from . import utils
if TYPE_CHECKING:
from . import objtypes as s_objtypes
from . import types as s_types
from . import schema as s_schema
LinkTargetDeleteAction = qltypes.LinkTargetDeleteAction
def merge_actions(
target: so.InheritingObject,
sources: List[so.Object],
field_name: str,
*,
ignore_local: bool = False,
schema: s_schema.Schema,
) -> Any:
if not ignore_local:
ours = target.get_explicit_local_field_value(schema, field_name, None)
else:
ours = None
if ours is None:
current = None
current_from = None
for source in sources:
theirs = source.get_explicit_field_value(schema, field_name, None)
if theirs is not None:
if current is None:
current = theirs
current_from = source
elif current != theirs:
target_source = target.get_source(schema)
current_from_source = current_from.get_source(schema)
source_source = source.get_source(schema)
tgt_repr = (
f'{target_source.get_displayname(schema)}.'
f'{target.get_displayname(schema)}'
)
cf_repr = (
f'{current_from_source.get_displayname(schema)}.'
f'{current_from.get_displayname(schema)}'
)
other_repr = (
f'{source_source.get_displayname(schema)}.'
f'{source.get_displayname(schema)}'
)
raise errors.SchemaError(
| f'cannot implicitly resolve the '
f'`on target delete` action for '
f'{tgt_repr!r}: it is defined as {current} in '
f'{cf_repr!r} and as {theirs} in {other_repr!r}; '
f'to resolve, declare `on target delete` '
f'explicitly on {tgt_ | repr!r}'
)
return current
else:
return ours
class Link(
sources.Source,
pointers.Pointer,
s_abc.Link,
qlkind=qltypes.SchemaObjectClass.LINK,
data_safe=False,
):
on_target_delete = so.SchemaField(
LinkTargetDeleteAction,
default=LinkTargetDeleteAction.Restrict,
coerce=True,
compcoef=0.9,
merge_fn=merge_actions)
def get_target(self, schema: s_schema.Schema) -> s_objtypes.ObjectType:
return self.get_field_value( # type: ignore[no-any-return]
schema, 'target')
def is_link_property(self, schema: s_schema.Schema) -> bool:
return False
def is_property(self, schema: s_schema.Schema) -> bool:
return False
def scalar(self) -> bool:
return False
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return bool([p for p in self.get_pointers(schema).objects(schema)
if not p.is_special_pointer(schema)])
def get_source_type(
self,
schema: s_schema.Schema
) -> s_types.Type:
from . import types as s_types
source = self.get_source(schema)
assert isinstance(source, s_types.Type)
return source
def compare(
self,
other: so.Object,
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: so.ComparisonContext,
) -> float:
if not isinstance(other, Link):
if isinstance(other, pointers.Pointer):
return 0.0
else:
raise NotImplementedError()
return super().compare(
other, our_schema=our_schema,
their_schema=their_schema, context=context)
def set_target(
self,
schema: s_schema.Schema,
target: s_types.Type,
) -> s_schema.Schema:
schema = super().set_target(schema, target)
tgt_prop = self.getptr(schema, sn.UnqualName('target'))
schema = tgt_prop.set_target(schema, target)
return schema
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='link'),
sn.QualName(module='schema', name='__type__'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'link')
class LinkSourceCommandContext(sources.SourceCommandContext):
pass
class LinkSourceCommand(inheriting.InheritingObjectCommand[sources.Source_T]):
pass
class LinkCommandContext(pointers.PointerCommandContext[Link],
constraints.ConsistencySubjectCommandContext,
properties.PropertySourceContext,
indexes.IndexSourceCommandContext):
pass
class LinkCommand(
properties.PropertySourceCommand[Link],
pointers.PointerCommand[Link],
context_class=LinkCommandContext,
referrer_context_class=LinkSourceCommandContext,
):
def _append_subcmd_ast(
self,
schema: s_schema.Schema,
node: qlast.DDLOperation,
subcmd: sd.Command,
context: sd.CommandContext,
) -> None:
if (
isinstance(subcmd, pointers.PointerCommand)
and subcmd.classname != self.classname
):
pname = sn.shortname_from_fullname(subcmd.classname)
if pname.name in {'source', 'target'}:
return
super()._append_subcmd_ast(schema, node, subcmd, context)
def validate_object(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
"""Check that link definition is sound."""
super().validate_object(schema, context)
scls = self.scls
assert isinstance(scls, Link)
if not scls.get_owned(schema):
return
target = scls.get_target(schema)
assert target is not None
if not target.is_object_type():
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link target type, expected object type, got '
f'{target.get_verbosename(schema)}',
context=srcctx,
)
if target.is_free_object_type(schema):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'{target.get_verbosename(schema)} is not a valid link target',
context=srcctx,
)
if (
not scls.is_pure_computable(schema)
and not scls.get_from_alias(schema)
and target.is_view(schema)
):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link type: {target.get_displayname(schema)!r}'
f' is an expression |
BFriedland/RPi-HAUS | haus_site/api/serializers.py | Python | lgpl-3.0 | 3,890 | 0.001799 | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from haus.models import Device, Atom, Data, CurrentData
class AtomSerializer(serializers.ModelSerializer):
class Meta:
model = Atom
def restore_object(self, attrs, instance=None):
print("attrs == " + str(attrs))
# print str(instance)
if instance:
instance.atom_name = attrs.get('atom_name', instance.atom_name)
instance.device = attrs.get('device', instance.device)
instance.save()
return instance
return Atom(**attrs)
class DataSerializer(serializers.ModelSerializer):
atom_name = serializers.SerializerMethodField('get_atom_name')
def get_atom_name(self, obj):
return obj.atom.atom_name
class Meta:
model = Data
# def restore_object(self, attrs, instance=None):
# # Instance should never exist when submitting data through the API.
# # if instance:
# # instance.atom = attrs.get('atom', instance.atom)
# # instance.value = attrs.get('value', instance.value)
# # instance.timestamp = attrs.get('timestamp', instance.timestamp)
# # instance.save()
# # return instance
# print(str(attrs))
# return Data(**attrs)
class CurrentDataSerializer(serializers.ModelSerializer):
atom_name = serializers.SerializerMethodField('get_atom_name')
def get_atom_name(self, obj):
return obj.atom.atom_name
class Meta:
model = CurrentData
def restore_object(self, attrs, instance=None):
if instance:
# For current data, the atom will never
# change once it has been assigned.
# instance.atom = attrs.get('atom', instance.atom)
instance.value = attrs.get('value', instance.value)
instance.timestamp = attrs.get('timestamp', instance.timestamp)
instance.save()
return instance
print(str(attrs))
return CurrentData(**attrs)
# class DataSerializer(serializers.ModelSerializer):
# class Meta:
# model = Data
class DeviceSerializer(serializers.ModelSerializer):
# id = serializers.Field()
# name = serializers.CharField(max_length=200)
atoms = serializers.SerializerMethodField('get_atoms')
# NOTE: Devices currently only have one user.
# If this changes, see also the models.py file and views.py file.
# ForeignKey for serializers is RelatedField. Reference:
# http://www.django-rest-framework.org/api-guide/relations/
# user = serializers.PrimaryKeyRelatedField()
# user = serializers.RelatedField()
# 'monitor' or 'controller'
# device_type = serializers.CharField(max_length=20)
# serialpath might not be on Devices -- maybe move to AtomSerializer
# depending on what the model turns out to be?
# serialpath = serializers.CharField(max_length=200)
# user = serializers.SerializerMethodField('get_user_id')
# def get_user_id(self, obj):
# return obj.user.pk
class | Meta:
model = Device
def get_atoms(self, obj):
return {atom.atom_name: atom.pk for atom in obj.atoms.all()}
# Requires importing the models (so you can creat | e a new entry in the DB)
def restore_object(self, attrs, instance=None):
print("attrs == " + str(attrs))
print str(instance)
if instance:
self.was_created = False
instance.device_name = attrs.get('device_name', instance.device_name)
instance.user = attrs.get('user_id', instance.user)
instance.device_type = attrs.get('device_type', instance.device_type)
instance.save()
return instance
self.was_created = True
return Device.create(**attrs)
|
glogiotatidis/mozillians-new | vendor-local/lib/python/south/creator/changes.py | Python | bsd-3-clause | 24,279 | 0.004078 | """
Contains things to detect changes - either using options passed in on the
commandline, or by using autodetection, etc.
"""
from __future__ import print_function
from django.db import models
from django.contrib.contenttypes.generic import GenericRelation
from django.utils.datastructures import SortedDict
from south.creator.freezer import remove_useless_attributes, freeze_apps, model_key
from south.utils import auto_through
from south.utils.py3 import string_types
class BaseChanges(object):
"""
Base changes class.
"""
def suggest_name(self):
return ''
def split_model_def(self, model, model_def):
"""
Given a model and its model def (a dict of field: triple), returns three
items: the real fields dict, the Meta dict, and the M2M fields dict.
"""
real_fields = SortedDict()
meta = SortedDict()
m2m_fields = SortedDict()
for name, triple in model_def.items():
if name == "Meta":
meta = triple
elif isinstance(model._meta.get_field_by_name(name)[0], models.ManyToManyField):
m2m_fields[name] = triple
else:
real_fields[name] = triple
return real_fields, meta, m2m_fields
def current_model_from_key(self, key):
app_label, model_name = key.split(".")
return models.get_model(app_label, model_name)
def current_field_from_key(self, key, fieldname):
app_label, model_name = key.split(".")
# Special, for the magical field from order_with_respect_to
if fieldname == "_order":
field = models.IntegerField()
field.name = "_order"
field.attname = "_order"
field.column = "_order"
field.default = 0
return field
# Otherwise, normal.
return models.get_model(app_label, model_name)._meta.get_field_by_name(fieldname)[0]
class AutoChanges(BaseChanges):
"""
Detects changes by 'diffing' two sets of frozen model definitions.
"""
# Field types we don't generate add/remove field changes for.
IGNORED_FIELD_TYPES = [
GenericRelation,
]
def __init__(self, migrations, old_defs, old_orm, new_defs):
self.migrations = migrations
self.old_defs = old_defs
self.old_orm = old_orm
self.new_defs = new_defs
def suggest_name(self):
parts = ["auto"]
for change_name, params in self.get_changes():
if change_name == "AddModel":
parts.append("add_%s" % params['model']._meta.object_name.lower())
elif change_name == "DeleteModel":
parts.append("del_%s" % params['model']._meta.object_name.lower())
elif change_name == "AddField":
parts.append("add_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['field'].name,
))
elif change_name == "DeleteField":
parts.append("del_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['field'].name,
))
elif change_name == "ChangeField":
parts.append("chg_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['new_field'].name,
| ))
elif change_name == "Ad | dUnique":
parts.append("add_unique_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "DeleteUnique":
parts.append("del_unique_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "AddIndex":
parts.append("add_index_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "DeleteIndex":
parts.append("del_index_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
return ("__".join(parts))[:70]
def get_changes(self):
"""
Returns the difference between the old and new sets of models as a 5-tuple:
added_models, deleted_models, added_fields, deleted_fields, changed_fields
"""
deleted_models = set()
# See if anything's vanished
for key in self.old_defs:
if key not in self.new_defs:
# We shouldn't delete it if it was managed=False
old_fields, old_meta, old_m2ms = self.split_model_def(self.old_orm[key], self.old_defs[key])
if old_meta.get("managed", "True") != "False":
# Alright, delete it.
yield ("DeleteModel", {
"model": self.old_orm[key],
"model_def": old_fields,
})
# Also make sure we delete any M2Ms it had.
for fieldname in old_m2ms:
# Only delete its stuff if it wasn't a through=.
field = self.old_orm[key + ":" + fieldname]
if auto_through(field):
yield ("DeleteM2M", {"model": self.old_orm[key], "field": field})
# And any index/uniqueness constraints it had
for attr, operation in (("unique_together", "DeleteUnique"), ("index_together", "DeleteIndex")):
together = eval(old_meta.get(attr, "[]"))
if together:
# If it's only a single tuple, make it into the longer one
if isinstance(together[0], string_types):
together = [together]
# For each combination, make an action for it
for fields in together:
yield (operation, {
"model": self.old_orm[key],
"fields": [self.old_orm[key]._meta.get_field_by_name(x)[0] for x in fields],
})
# We always add it in here so we ignore it later
deleted_models.add(key)
# Or appeared
for key in self.new_defs:
if key not in self.old_defs:
# We shouldn't add it if it's managed=False
new_fields, new_meta, new_m2ms = self.split_model_def(self.current_model_from_key(key), self.new_defs[key])
if new_meta.get("managed", "True") != "False":
yield ("AddModel", {
"model": self.current_model_from_key(key),
"model_def": new_fields,
})
# Also make sure we add any M2Ms it has.
for fieldname in new_m2ms:
# Only create its stuff if it wasn't a through=.
field = self.current_field_from_key(key, fieldname)
if auto_through(field):
yield ("AddM2M", {"model": self.current_model_from_key(key), "field": field})
# And any index/uniqueness constraints it has
for attr, operation in (("unique_together", "AddUnique"), ("index_together", "AddIndex")):
together = eval(new_meta.get(attr, "[]"))
if together:
# If it's only a single tuple, make it into the longer one
if isinstance(together[0], string_types):
together = [together]
# For each combination, make an a |
playpauseandstop/setman | testproject-django/testapp/forms.py | Python | bsd-3-clause | 783 | 0.002554 | from django import forms
from django.utils.translation import ugettext_lazy as _
__all__ = ('SandboxForm', )
class SandboxForm(forms.Form):
"""
Simple form form "Sandbox" page.
"""
FORBIDDEN_SETTINGS = (
'DATABASES', 'ODESK_PRIVATE_KEY', 'ODESK_PUBLIC_KEY', 'SECRET_KEY'
)
name = forms.CharField(label=_('N | ame'), required=True,
help_text=_('Enter name of available setting, press Enter - get ' \
'setting value.'),
widget=forms.TextInput(attrs={'size': 50}))
def clean_name(self):
name = self.cleaned_data['name']
if name in self.FORBIDDEN_SETTINGS:
raise forms.ValidationError(_(
'The value for this setting is forbidden.'
| ))
return name
|
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/zeitgeist/datamodel.py | Python | gpl-3.0 | 49 | 0.020408 | ../../../ | ../share/pyshared/zeitgeist/datamodel.p | y |
jarble/EngScript | libraries/factors.py | Python | mit | 136 | 0.022059 | def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range( | 1, int(n**0.5) + 1) if n % i == 0))) | |
lowitty/selenium | com/ericsson/xn/x/fm/TestCases/case_ltehss_fm.py | Python | mit | 891 | 0.013468 | '''
Created on Mar 1, 2016
@author: eyyylll
'''
import os
from com.ericsson.xn.x.fm.FmCommons.GuiDataFunc import check_alarm_data_accuracy
from com.ericsson.xn.commons.caseutils import pre_test_case, post_test_case
root_dir = os.path.normpath(os.path.dirname(os.path.abspath(__file__))).split('com' + os.sep + 'ericsson' + os.sep + 'xn' + os.sep + 'x' + os.sep + 'fm' + os.sep + 'TestCases')[0]
se | rver | _info_cfg = root_dir + "x" + os.sep + "pm" + os.sep + "execute_conf.cfg"
ne_info_cfg = root_dir + "x" + os.sep + "pm" + os.sep + "nes" + os.sep + "ltehss.cfg"
alarm_mapping_cfg = root_dir + "x" + os.sep + "fm" + os.sep + "gui_mapping" + os.sep + "hss.cfg"
def check_ltehss_alarm_accuracy():
pre_test_case("check_ltehss_gui_accuracy_case","gui_fm_accuracy")
check_alarm_data_accuracy(ne_info_cfg,server_info_cfg,alarm_mapping_cfg)
post_test_case()
|
scigghia/l10n-italy | l10n_it_ricevute_bancarie/__openerp__.py | Python | agpl-3.0 | 2,231 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Fou | ndation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it w | ill be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Ricevute Bancarie",
'version': "8.0.1.3.0",
'author': "Odoo Community Association (OCA)",
'category': "Accounting & Finance",
'website': "http://www.odoo-italia.org",
'license': "AGPL-3",
'depends': [
'account_voucher',
'l10n_it_fiscalcode',
'account_due_list',
'base_iban'],
'data': [
"views/partner_view.xml",
"views/configuration_view.xml",
"riba_sequence.xml",
"views/wizard_accreditation.xml",
"views/wizard_unsolved.xml",
"views/riba_view.xml",
"views/account_view.xml",
"views/wizard_riba_issue.xml",
"views/wizard_riba_file_export.xml",
"views/account_config_view.xml",
"riba_workflow.xml",
"security/ir.model.access.csv",
],
'images': [],
'demo': ["demo/riba_demo.xml"],
'test': [
'test/riba_invoice.yml',
'test/issue_riba.yml',
'test/unsolved_riba.yml',
],
'installable': True,
}
|
jayvdb/flake8-putty | tests/__init__.py | Python | mit | 44 | 0 | # -*- coding: utf-8 -*-
"" | "Test package."""
| |
nathanbjenx/cairis | cairis/gui/RolesDialog.py | Python | apache-2.0 | 2,882 | 0.028799 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U | nless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES | OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.Role import Role
from RoleDialog import RoleDialog
from DialogClassParameters import DialogClassParameters
from cairis.core.ARM import *
from DimensionBaseDialog import DimensionBaseDialog
__author__ = 'Shamal Faily'
class RolesDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,ROLES_ID,'Roles',(800,300),'role.png')
idList = [ROLES_LISTROLES_ID,ROLES_BUTTONADD_ID,ROLES_BUTTONDELETE_ID]
columnList = ['Name','Short Code','Type']
self.buildControls(idList,columnList,self.dbProxy.getRoles,'role')
listCtrl = self.FindWindowById(ROLES_LISTROLES_ID)
listCtrl.SetColumnWidth(0,150)
listCtrl.SetColumnWidth(1,100)
listCtrl.SetColumnWidth(2,400)
def addObjectRow(self,listCtrl,listRow,role):
listCtrl.InsertStringItem(listRow,role.name())
listCtrl.SetStringItem(listRow,1,role.shortCode())
listCtrl.SetStringItem(listRow,2,role.type())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(ROLE_ID,'Add role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.addRole,True)
self.addObject(addParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
try:
updateParameters = DialogClassParameters(ROLE_ID,'Edit role',RoleDialog,ROLE_BUTTONCOMMIT_ID,self.dbProxy.updateRole,False)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onDelete(self,evt):
try:
self.deleteObject('No role','Delete role',self.dbProxy.deleteRole)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete role',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
|
roadmapper/ansible | lib/ansible/modules/cloud/vmware/vmware_export_ovf.py | Python | gpl-3.0 | 15,634 | 0.00339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_export_ovf
short_description: Exports a VMware virtual machine to an OVF file, device files and a manifest file
description: >
This module can be used to export a VMware virtual machine to OVF template from vCenter server or ESXi host.
version_added: '2.8'
author:
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
requirements:
- python >= 2.6
- PyVmomi
notes: []
options:
name:
description:
- Name of the virtual machine to export.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- Uuid of the virtual machine to export.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
datacenter:
default: ha-datacenter
description:
- Datacenter name of the virtual machine to export.
- This parameter is case sensitive.
type: str
folder:
description:
- Destination folder, absolute path to find the specified guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
export_dir:
description:
- Absolute path to place the exported files on the server running this task, must have write permission.
- If folder not exist will create it, also create a folder under this path named with VM name.
required: yes
type: path
export_with_images:
default: false
description:
- Export an ISO image of the media mounted on the CD/DVD Drive within the virtual machine.
type: bool
download_timeout:
description:
- The user defined timeout in second of exporting file.
- If the vmdk file is too large, you can increase the value.
default: 30
type: int
version_added: '2.9'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- vmware_export_ovf:
validate_certs: false
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ vm_name }}'
export_with_images: true
export_dir: /path/to/ovf_template/
delegate_to: localhost
'''
RETURN = r'''
instance:
description: list of the exported files, if exported from vCenter server, device file is not named with vm name
returned: always
type: dict
sample: None
'''
import os
import hashlib
from time import sleep
from threading import Thread
from ansible.module_utils.urls import open_url
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
try:
from pyVmomi import vim
from pyVim import connect
except ImportError:
pass
class LeaseProgressUpdater(Thread):
def __init__(self, http_nfc_lease, update_interval):
Thread.__init__(self)
self._running = True
self.httpNfcLease = http_nfc_lease
self.updateInterval = update_interval
self.progressPercent = 0
def set_progress_percent(self, progress_percent):
self.progressPercent = progress_percent
def stop(self):
self._running = False
def run(self):
while self._running:
try:
if self.httpNfcLease.state == vim.HttpNfcLease.State.done:
return
self.httpNfcLease.HttpNfcLeaseProgress(self.progressPercent)
sleep_sec = 0
while True:
if self.httpNfcLease.state == vim.HttpNfcLease.State.done or self.httpNfcLease.state == vim.HttpNfcLease.State.error:
return
sleep_sec += 1
sleep(1)
if sleep_sec == self.updateInterval:
break
except Exception:
return
class VMwareExportVmOvf(PyVmomi):
def __init__(self, module):
super(VMwareExportVmOvf, self).__init__(module | )
self.mf_file = ''
self.ovf_dir = ''
# set read device content chunk size to 2 MB
self.chunk_size = 2 * 2 ** 20
# set lease progress update interval to 15 seconds
self.lease_interval = 15
self.facts = {'device_files': []}
self.download_timeout = None
def create_export_dir(self, vm_obj):
self.ovf_dir = os.path.join(self.params['export_dir'], vm_obj.name)
if not os.path.exists(self.ovf_dir):
| try:
os.makedirs(self.ovf_dir)
except OSError as err:
self.module.fail_json(msg='Exception caught when create folder %s, with error %s'
% (self.ovf_dir, to_text(err)))
self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + '.mf')
def download_device_files(self, headers, temp_target_disk, device_url, lease_updater, total_bytes_written,
total_bytes_to_write):
mf_content = 'SHA256(' + os.path.basename(temp_target_disk) + ')= '
sha256_hash = hashlib.sha256()
response = None
with open(self.mf_file, 'a') as mf_handle:
with open(temp_target_disk, 'wb') as handle:
try:
response = open_url(device_url, headers=headers, validate_certs=False, timeout=self.download_timeout)
except Exception as err:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Exception caught when getting %s, %s' % (device_url, to_text(err)))
if not response:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Getting %s failed' % device_url)
if response.getcode() >= 400:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Getting %s return code %d' % (device_url, response.getcode()))
current_bytes_written = 0
block = response.read(self.chunk_size)
while block:
handle.write(block)
sha256_hash.update(block)
handle.flush()
os.fsync(handle.fileno())
current_bytes_written += len(block)
block = response.read(self.chunk_size)
written_percent = ((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write
lease_updater.progressPercent = int(written_percent)
mf_handle.write(mf_content + sha256_hash.hexdigest() + '\n')
self.facts['device_files'].append(temp_target_disk)
return current_bytes_written
def export_to_ovf_files(self, vm_obj):
self.create_expo |
itkvideo/ITK | Wrapping/WrapITK/Languages/Python/Tests/SmoothingRecursiveGaussianImageFilter.py | Python | apache-2.0 | 1,074 | 0.011173 | #==========================================================================
#
# Copyright Insight Software Consort | ium
#
# Licensed under the Apache License, Version 2.0 (the "License") | ;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the SmoothingRecursiveGaussianImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
reader = itk.ImageFileReader.IUC2.New( FileName=argv[1] )
filter = itk.SmoothingRecursiveGaussianImageFilter.New( reader, Sigma=eval( argv[3] ) )
itk.write( filter, argv[2] )
|
NeuroRoboticTech/Jetduino | Software/Python/grove_sound_sensor.py | Python | mit | 2,624 | 0.003049 | #!/usr/bin/env python
#
# Jetduino Example for using the Grove Sound Sensor and the Grove LED
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Modules:
# http://www.seeedstudio.com/wiki/Grove_-_Sound_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | OF MERCHAN | TABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Sound Sensor to analog port A0
# SIG,NC,VCC,GND
sound_sensor = ARD_A0
# Connect the Grove LED to digital port D5
# SIG,NC,VCC,GND
led = LED_D4
jetduino.pinMode(led, OUTPUT_PIN)
# The threshold to turn the led on 400.00 * 5 / 1024 = 1.95v
threshold_value = 600
while True:
try:
# Read the sound level
sensor_value = jetduino.analogRead(sound_sensor)
# If loud, illuminate LED, otherwise dim
if sensor_value > threshold_value:
jetduino.digitalWrite(led, HIGH)
else:
jetduino.digitalWrite(led, LOW)
print ("sensor_value =", sensor_value)
time.sleep(.5)
except IOError:
print ("Error")
|
allisnone/pytrade | position_history_update.py | Python | gpl-2.0 | 22,677 | 0.016749 | # -*- coding:utf-8 -*-
# !/usr/bin/env python
#import easytrader
import easyhistory
import pdSql_common as pds
from pdSql import StockSQL
import sys
import datetime
from pytrade_api import *
from multiprocessing import Pool
import os, time
import file_config as fc
import code
stock_sql_obj=StockSQL(sqlite_file='pytrader.db',sqltype='sqlite',is_today_update=True)
CHINESE_DICT = stock_sql_obj.get_code_to_name()
def seprate_list(all_codes,seprate_num=4):
"""
分割股票池
"""
#all_codes = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
c = len(all_codes)
sub_c = int(c/seprate_num)
code_list_dict = {}
for j in range(seprate_num-1):
code_list_dict[j] = all_codes[j*sub_c:(j+1)*sub_c]
code_list_dict[j+1] = all_codes[(j+1)*sub_c:]
return code_list_dict
def update_yh_hist_data(all_codes,process_id,latest_date_str):
"""
更新历史数据,单个CPU
"""
all_count = len(all_codes)
print('processor %s: all_count=%s '% (process_id,all_count))
if all_count<=0:
print('processor %s: empty list'% process_id)
return
else:
print('processor %s start'%process_id)
latest_count = 0
count = 0
pc0=0
#print('all_codes=',all_codes)
for code in all_codes:
#print('code=',code)
df,has_tdx_last_string = pds.get_yh_raw_hist_df(code,latest_count=None)
pc = round(round(count,2)/all_count,2)*100
if pc>pc0:
#print('count=',count)
print('processor %s 完成数据更新百分之%s' % (process_id,pc))
pc0 = pc
if len(df)>=1:
last_code_trade_date = df.tail(1).iloc[0].date
if last_code_trade_date==latest_date_str:
latest_count = latest_count + 1
#time.sleep(0.2)
count = count + 1
latest_update_rate =round(round(latest_count,2)/all_count,2)
print('latest_update_rate_processor_%s=%s'%(process_id,latest_update_rate))
return
def update_one_stock_k_data(code):
df,has_tdx_last_string = pds.get_yh_raw_hist_df(code,latest_count=None)
return
def multiprocess_update_k_data0(code_list_dict,update_type='yh'):
"""
多进程更新历史数据,apply_async方法
存在问题:数据分片丢失
"""
#code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
print('Parent process %s.' % os.getpid())
processor_num=len(code_list_dict)
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(processor_num):
p.apply_async(update_yh_hist_data, args=(code_list_dict[i],i,last_date_str,))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
return
def multiprocess_update_k_data(allcodes,update_type='yh',pool_num=10):
"""
多进程更新历史数据,map方法
"""
#code_list_dict = seprate_list(all_codes,4)
#print('code_list_dict=',code_list_dict)
print('Parent process %s, multiprocess_num=%s.' % (os.getpid(),pool_num))
processor_num=len(allcodes)
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool(pool_num)
p.map(update_one_stock_k_data,allcodes)
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
return
def update_k_data(update_type='yh'):
stock_sql = StockSQL()
hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
print('hold_stocks=',hold_stocks)
print('available_sells=',available_sells)
#pds.get_exit_price(hold_codes=['002521'],data_path='C:/中国银河证券海王星/T0002/export/' )
#print(hold_df)
"""从新浪 qq网页更新股票"""
#easyhistory.init(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.update(path="C:/hist",stock_codes=hold_stocks)
"""从银河更新股票"""
#for stock in hold_stocks:
#pds.update_one_stock(symbol=stock,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=False)
# pass
#stock_sql.update_sql_position(users={'account':'36005','broker':'yh','json':'yh.json'})
#stock_sql.update_sql_position(users={'account':'38736','broker':'yh','json':'yh1.json'})
#hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
#print('hold_stocks=',hold_stocks)
#print(hold_df)
#pds.update_one_stock(symbol='sh',force_update=False)
#pds.update_codes_from_YH(realtime_update=False)
"""从银河更新指数"""
#pds.update_codes_from_YH(realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
#pds.update_codes_from_YH(realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
#indexs = ['zxb', 'sh50', 'hs300', 'sz300', 'cyb', 'sz', 'zx300', 'sh']
"""
potential_df = stock_sql.query_data(table='potential',fields='category_id,code,valid,name',condition='valid>=1')
print(potential_df)
lanchou_df = potential_df[potential_df['category_id']==1]
print(lanchou_df['code'].values.tolist())
"""
#"""
last_date_str = pds.tt.get_last_trade_date(date_format='%Y/%m/%d')
latest_date_str = pds.tt.get_latest_trade_date(date_format='%Y/%m/%d')
next_date_str = pds.tt.get_next_trade_date(date_format='%Y/%m/%d')
print('last_date = ',last_date_str)
print('latest_date_str=',latest_date_str)
print('next_date_str=',next_date_str)
indexs,funds,b_stock,all_stocks = pds.get_different_symbols()
if update_type == 'index':
#从银河更新指数
#stock_sql.update_sql_index(index_list=['sh','sz','zxb','cyb','hs300','sh50'],force_update=False)
#stock_sql.download_hist_as_csv(indexs = ['sh','sz','zxb','cyb','hs300','sh50'],dir='C:/hist/day/data/')
pds.update_codes_from_YH(indexs,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
elif update_type == 'fund':
#从银河更新基金
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
funds =[]
for code in all_codes:
if code.startswi | th('1') or code.startswith('5'):
funds.append(code)
pds.update_codes_from_YH(funds,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=True)
elif update_type == 'position':
#更新仓位
#stock_sql.update_sql_position(users={'36005':{'broker':'yh','json':'yh.json'},'38736':{'broker':'yh','json':'yh1.json'}})
stock_sql.update_sql_position(users={'account':'36005','broker':'yh','json':'yh.json'}) |
stock_sql.update_sql_position(users={'account':'38736','broker':'yh','json':'yh1.json'})
hold_df,hold_stocks,available_sells = stock_sql.get_hold_stocks(accounts = ['36005', '38736'])
print('hold_stocks=',hold_stocks)
print(hold_df)
elif update_type == 'stock':
#从新浪 qq网页更新股票
#easyhistory.init(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.update(path="C:/hist",stock_codes=hold_stocks)
#easyhistory.init(path="C:/hist")#,stock_codes=all_codes)
easyhistory.update(path="C:/hist",stock_codes=all_stocks)#+b_stock)
elif update_type == 'YH' or update_type == 'yh':
all_codes = pds.get_all_code(hist_dir='C:/中国银河证券海王星/T0002/export/')
#all_codes = ['999999', '000016', '399007', '399008', '399006', '000300', '399005', '399001',
# '399004','399106','000009','000010','000903','000905']
#all_codes=['300162']
"""
code_list_dict = seprate_list(all_codes,4)
print('code_list_dict=',code_list_dict)
print('Parent process %s.' % os.getpid())
#update_yh_hist_data(codes_list=[],process_id=0)
p = Pool()
for i in range(4):
p.apply_async(update_yh_hist_data, args=(code_list_dict[i],i))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
"""
all_count = len(all_codes)
|
Pr0Ger/SGSB | plugins/Cogs.py | Python | mit | 526 | 0.001901 | import os
from lib.base_plugin import BasePlugin
from lib.paths import SteamCloudPath, SteamGamesPath
class CogsPlugin(BasePlugin):
Name = "Cogs"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Data', os.path.join(SteamCloudPath, '26500'), 'remote')
def restore(self, _):
| _.restore_folder('Data', os.path.join( | SteamCloudPath, '26500'), 'remote')
def detect(self):
if os.path.isdir(os.path.join(SteamGamesPath, 'cogs')):
return True
return False
|
pennersr/django-allauth | allauth/account/models.py | Python | mit | 5,643 | 0.000709 | import datetime
from django.core import signing
from django.db import models, transaction
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .. import app_settings as allauth_app_settings
from . import app_settings, signals
from .adapter import get_adapter
from .managers import EmailAddressManager, EmailConfirmationManager
from .utils import user_email
class EmailAddress(models.Model):
user = models.ForeignKey(
allauth_app_settings.USER_MODEL,
verbose_name=_("user"),
on_delete=models.CASCADE,
)
email = models.EmailField(
unique=app_settings.UNIQUE_EMAIL,
max_length=app_settings.EMAIL_MAX_LENGTH,
verbose_name=_("e-mail address"),
)
verified = models.BooleanFie | ld(verbose_name=_("verified"), default=False)
primary = models.BooleanField(verbose_n | ame=_("primary"), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not app_settings.UNIQUE_EMAIL:
unique_together = [("user", "email")]
def __str__(self):
return self.email
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
user_email(self.user, self.email)
self.user.save()
return True
def send_confirmation(self, request=None, signup=False):
if app_settings.EMAIL_CONFIRMATION_HMAC:
confirmation = EmailConfirmationHMAC(self)
else:
confirmation = EmailConfirmation.create(self)
confirmation.send(request, signup=signup)
return confirmation
def change(self, request, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
with transaction.atomic():
user_email(self.user, new_email)
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation(request)
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(
EmailAddress,
verbose_name=_("e-mail address"),
on_delete=models.CASCADE,
)
created = models.DateTimeField(verbose_name=_("created"), default=timezone.now)
sent = models.DateTimeField(verbose_name=_("sent"), null=True)
key = models.CharField(verbose_name=_("key"), max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for %s" % self.email_address
@classmethod
def create(cls, email_address):
key = get_adapter().generate_emailconfirmation_key(email_address.email)
return cls._default_manager.create(email_address=email_address, key=key)
def key_expired(self):
expiration_date = self.sent + datetime.timedelta(
days=app_settings.EMAIL_CONFIRMATION_EXPIRE_DAYS
)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self, request):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(
sender=self.__class__,
request=request,
email_address=email_address,
)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(
sender=self.__class__,
request=request,
confirmation=self,
signup=signup,
)
class EmailConfirmationHMAC:
def __init__(self, email_address):
self.email_address = email_address
@property
def key(self):
return signing.dumps(obj=self.email_address.pk, salt=app_settings.SALT)
@classmethod
def from_key(cls, key):
try:
max_age = 60 * 60 * 24 * app_settings.EMAIL_CONFIRMATION_EXPIRE_DAYS
pk = signing.loads(key, max_age=max_age, salt=app_settings.SALT)
ret = EmailConfirmationHMAC(EmailAddress.objects.get(pk=pk, verified=False))
except (
signing.SignatureExpired,
signing.BadSignature,
EmailAddress.DoesNotExist,
):
ret = None
return ret
def confirm(self, request):
if not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(
sender=self.__class__,
request=request,
email_address=email_address,
)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
signals.email_confirmation_sent.send(
sender=self.__class__,
request=request,
confirmation=self,
signup=signup,
)
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/DataHandlers/Mcl_Cmd_NetBios_DataHandler.py | Python | unlicense | 13,809 | 0.001521 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_NetBios_DataHandler.py
def DataHandlerMain(namespace, InputFilename, OutputFilename):
import mcl.imports
import mcl.data.Input
import mcl.data.Output
import mcl.msgtype
import mcl.status
import mcl.target
import mcl.object.Message
mcl.imports.ImportNamesWithNamespace(namespace, 'mca.network.cmd.netbios', globals())
input = mcl.data.Input.GetInput(InputFilename)
output = mcl.data.Output.StartOutput(OutputFilename, input)
output.Start('NetBios', 'netbios', [])
msg = mcl.object.Message.DemarshalMessage(input.GetData())
if input.GetStatus() != mcl.status.MCL_SUCCESS:
errorMsg = msg.FindMessage(mcl.object.Message.MSG_KEY_RESULT_ERROR)
moduleError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_MODULE)
osError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_OS)
output.RecordModuleError(moduleError, osError, errorStrings)
output.EndWithStatus(input.GetStatus())
return True
gotAny = False
while msg.GetNumRetrieved() < msg.GetCount():
if mcl.CheckForStop():
output.EndWithStatus(mcl.target.CALL_FAILED)
return False
from mcl.object.XmlOutput import XmlOutput
xml = XmlOutput()
xml.Start('NetBios')
submsg = msg.FindMessage(MSG_KEY_RESULT_NETBIOS_ADAPTER)
statusResults = ResultStatus()
statusResults.Demarshal(submsg)
if statusResults.status == 0:
gotAny = True
_handleNCB(xml, output, submsg)
_handleAdapter(xml, output, submsg)
output.RecordXml(xml)
else:
output.RecordModuleError(statusResults.errType, 0, errorStrings)
output.RecordError('Netbios Error (0x%x): %s' % (statusResults.rtnCode, _getNetbiosError(statusResults.rtnCode)))
if gotAny:
output.EndWithStatus(mcl.target.CALL_SUCCEEDED)
else:
output.EndWithStatus(mcl.target.CALL_FAILED)
return True
def _handleNCB(xml, output, msg):
results = ResultNCB()
results.Demarshal(msg)
sub = xml.AddSubElement('NCB')
sub.AddAttribute('ncb_command', '%i' % results.ncb_command)
sub.AddAttribute('ncb_retcode', '%i' % results.ncb_retcode)
sub.AddAttribute('ncb_lsn', '%i' % results.ncb_lsn)
sub.AddAttribute('ncb_num', '%i' % results.ncb_num)
sub.AddAttribute('ncb_rto', '%i' % results.ncb_rto)
sub.AddAttribute('ncb_sto', '%i' % results.ncb_sto)
sub.AddAttribute('ncb_lana_num', '%i' % results.ncb_lana_num)
sub.AddAttribute('ncb_cmd_cplt', '%i' % results.ncb_cmd_cplt)
subsub = sub.AddSubElement('CallName')
subsub.SetText(results.ncb_callname)
subsub = sub.AddSubElement('NCBName')
subsub.SetText(results.ncb_name)
def _handleAdapter(xml, output, msg):
results = ResultAdapter()
results.Demarshal(msg)
sub = xml.AddSubElement('Adapter')
sub.AddAttribute('adapter_addr', '%02x.%02x.%02x.%02x.%02x.%02x' % (results.adapter_address[0],
results.adapter_address[1],
results.adapter_address[2],
results.adapter_address[3],
results.adapter_address[4],
results.adapter_address[5]))
sub.AddAttribute('adapter_type', '0x%x' % results.adapter_type)
sub.AddAttribute('release', '%d.%d' % (results.rev_major, results.rev_minor))
sub.AddAttribute('duration', '%u' % results.duration)
sub.AddAttribute('name_count', '%u' % results.name_count)
sub.AddAttribute('frame_recv', '%u' % results.frmr_recv)
sub.AddAttribute('frame_xmit', '%u' % results.frmr_xmit)
sub.AddAttribute('iframe_recv_err', '%u' % results.iframe_recv_err)
sub.AddAttribute('xmit_aborts', '%u' % results.xmit_aborts)
sub.AddAttribute('xmit_success', '%u' % results.xmit_success)
sub.AddAttribute('recv_success', '%u' % results.recv_success)
sub.AddAttribute('iframe_xmit_err', '%u' % results.iframe_xmit_err)
sub.AddAttribute('recv_buff_unavail', '%u' % results.recv_buff_unavail)
sub.AddAttribute('t1_timeouts', '%u' % results.t1_timeouts)
sub.AddAttribute('ti_timeouts', '%u' % results.ti_timeouts)
sub.AddAttribute('free_ncbs', '%u' % results.free_ncbs)
sub.AddAttribute('max_dgram_size', '%u' % results.max_dgram_size)
sub.AddAttribute('max_sess_pkt_size', '%u' % results.max_sess_pkt_size)
sub.AddAttribute('pending_sess', '%u' % results.pending_sess)
sub.AddAttribute('max_cfg_sess', '%u' % results.max_cfg_sess)
sub.AddAttribute('max_cfg_ncbs', '%u' % results.max_cfg_ncbs)
sub.AddAttribute('max_ncbs', '%u' % results.max_ncbs)
sub.AddAttribute('xmit_buf_unavail', '%u' % results.xmit_buf_unavail)
sub.AddAttribute('max_sess', '%u' % results.max_sess)
i = 0
while i < results.name_count:
nameResults = ResultName()
nameResults.Demarshal(msg)
sub2 = sub.AddSubElement('Names')
sub3 = sub2.AddSubElement('Type')
sub3.SetText(_getNetbiosNameType(nameResults.type, nameResults.nameFlags, nameResults.networkName))
sub3 = sub2.AddSubElement('NetName')
sub3.SetText(_getNetName(nameResults.nameFlags))
sub3 = sub2.AddSubElement('Name')
sub3.SetText(nameResults.networkName)
i = i + 1
def _getNetbiosError(error):
if error == RESULT_NRC_GOODRET:
return 'The operation succeeded'
else:
if error == RESULT_NRC_BUFLEN:
return 'An illegal buffer length was supplied'
if error == RESULT_NRC_ILLCMD:
return 'An illegal command was supplied'
if error == RESULT_NRC_CMDTMO:
return 'The command timed out'
if error == RESULT_NRC_INCOMP:
return 'The message was incomplete. The application is to issue another command'
if error == RESULT_NRC_BADDR:
return 'The buffer address was illegal'
if error == RESULT_NRC_SNUMOUT:
return 'The session number was out of range'
if error == RESULT_NRC_NORES:
return 'No resource was available'
if error == RESULT_NRC_ | SCLOSED:
return 'The session was closed'
if error == RESULT_NRC_CMDCAN:
return 'The command was canceled'
if error == RESULT_NRC_DUPNAME:
return 'A duplicate name existed in the local name table'
if error == RESULT_NRC_NAMTFUL:
return 'The name ta | ble was full'
if error == RESULT_NRC_ACTSES:
return 'The command finished; the name has active sessions and is no longer registered'
if error == RESULT_NRC_LOCTFUL:
return 'The local session table was full'
if error == RESULT_NRC_REMTFUL:
return 'The remote session table was full. The request to open a session was rejected'
if error == RESULT_NRC_ILLNN:
return 'An illegal name number was specified'
if error == RESULT_NRC_NOCALL:
return 'The system did not find the name that was called'
if error == RESULT_NRC_NOWILD:
return 'Wildcards are not permitted in the ncb_name member'
if error == RESULT_NRC_INUSE:
return 'The name was already in use on the remote adapter'
if error == RESULT_NRC_NAMERR:
return 'The name was deleted'
if error == RESULT_NRC_SABORT:
return 'The session ended abnormally'
if error == RESULT_NRC_NAMCONF:
return 'A name conflict was detected'
if error == RESULT_NRC_IFBUSY:
return 'The interface was busy'
if error == RESULT_NRC_TOOMANY:
return 'Too many commands were outstanding; the application can retry the command later'
if error == RESULT_NRC_BRIDGE:
return 'The ncb_lana_num member did not specify a valid network number'
if error == RESULT_NRC_CANOCCR:
return 'The command finished while a cancel operation was occurring'
if error == RESULT_NRC_CANCEL:
return 'The NCBCANCEL command was not valid; the command |
pigmej/uwsgi_no_pp | plugins/pypy/pypy_setup.py | Python | gpl-2.0 | 29,660 | 0.002293 | import sys
import os
sys.path.insert(0, '.')
sys.path.extend(os.environ.get('PYTHONPATH', '').split(os.pathsep))
import imp
import traceback
__name__ = '__main__'
mainmodule = type(sys)('__main__')
sys.modules['__main__'] = mainmodule
import cffi
# this is a list holding object we do not want to be freed (like callback and handlers)
uwsgi_gc = []
# the main ffi
ffi = cffi.FFI()
# the hooks we need to patch
hooks = '''
void free(void *);
ssize_t read(int, void *, size_t);
ssize_t write(int, const void *, size_t);
int close(int);
void (*uwsgi_pypy_hook_execute_source)(char *);
void (*uwsgi_pypy_hook_loader)(char *);
void (*uwsgi_pypy_hook_file_loader)(char *);
void (*uwsgi_pypy_hook_paste_loader)(char *);
void (*uwsgi_pypy_hook_pythonpath)(char *);
void (*uwsgi_pypy | _hook_request)(struct wsgi_request *);
void (*uwsgi_pypy_post_fork_hook)(void);
'''
# here we load CFLAGS and uwsgi.h from the binary
defines0 = '''
char *uwsgi_get_cflags();
char *uwsgi_get_dot_h();
'''
ffi.cdef(defines0)
lib0 = ffi.verify(defines0)
# this is ugly, we should find a better approach
# basically it build a list of #define from binary CFLAGS
uwsgi_cdef = []
uwsgi_defines = []
uwsgi_cflags = ffi.string(lib0.uwsgi_get_cflags()).split()
for cflag in u | wsgi_cflags:
if cflag.startswith('-D'):
line = cflag[2:]
if '=' in line:
(key, value) = line.split('=', 1)
uwsgi_cdef.append('#define %s ...' % key)
uwsgi_defines.append('#define %s %s' % (key, value.replace('\\"', '"').replace('""', '"')))
else:
uwsgi_cdef.append('#define %s ...' % line)
uwsgi_defines.append('#define %s 1' % line)
uwsgi_dot_h = ffi.string(lib0.uwsgi_get_dot_h())
# uwsgi definitions
cdefines = '''
%s
struct iovec {
void *iov_base;
size_t iov_len;
...;
};
struct uwsgi_header {
uint8_t modifier1;
...;
};
struct wsgi_request {
int fd;
int async_id;
uint16_t var_cnt;
struct iovec *hvec;
int async_ready_fd;
int async_last_ready_fd;
int suspended;
struct uwsgi_header *uh;
...;
};
struct uwsgi_opt {
char *key;
char *value;
...;
};
struct uwsgi_worker {
int id;
int pid;
uint64_t requests;
uint64_t delta_requests;
uint64_t signals;
int cheaped;
int suspended;
int sig;
uint8_t signum;
uint64_t running_time;
uint64_t avg_response_time;
uint64_t tx;
...;
};
struct uwsgi_plugin {
uint8_t modifier1;
void (*suspend) (struct wsgi_request *);
void (*resume) (struct wsgi_request *);
...;
};
struct uwsgi_buffer {
char *buf;
size_t pos;
...;
};
struct uwsgi_lock_item {
...;
};
struct uwsgi_cache {
struct uwsgi_lock_item *lock;
...;
};
struct uwsgi_cache_item {
uint64_t keysize;
...;
};
struct uwsgi_server {
char hostname[];
int mywid;
int muleid;
int master_process;
struct uwsgi_opt **exported_opts;
int exported_opts_cnt;
struct uwsgi_worker *workers;
int signal_socket;
int numproc;
int async;
void (*schedule_to_main) (struct wsgi_request *);
void (*schedule_to_req) (void);
struct wsgi_request *(*current_wsgi_req) (void);
struct wsgi_request *wsgi_req;
struct uwsgi_plugin *p[];
...;
};
struct uwsgi_server uwsgi;
struct uwsgi_plugin pypy_plugin;
const char *uwsgi_pypy_version;
char *uwsgi_binary_path();
void *uwsgi_malloc(size_t);
int uwsgi_response_prepare_headers(struct wsgi_request *, char *, size_t);
int uwsgi_response_add_header(struct wsgi_request *, char *, uint16_t, char *, uint16_t);
int uwsgi_response_write_body_do(struct wsgi_request *, char *, size_t);
int uwsgi_response_sendfile_do_can_close(struct wsgi_request *, int, size_t, size_t, int);
char *uwsgi_request_body_read(struct wsgi_request *, ssize_t , ssize_t *);
char *uwsgi_request_body_readline(struct wsgi_request *, ssize_t, ssize_t *);
void uwsgi_buffer_destroy(struct uwsgi_buffer *);
int uwsgi_is_again();
int uwsgi_register_rpc(char *, struct uwsgi_plugin *, uint8_t, void *);
int uwsgi_register_signal(uint8_t, char *, void *, uint8_t);
char *uwsgi_do_rpc(char *, char *, uint8_t, char **, uint16_t *, uint64_t *);
void uwsgi_set_processname(char *);
int uwsgi_signal_send(int, uint8_t);
uint64_t uwsgi_worker_exceptions(int);
int uwsgi_worker_is_busy(int);
char *uwsgi_cache_magic_get(char *, uint16_t, uint64_t *, uint64_t *, char *);
int uwsgi_cache_magic_set(char *, uint16_t, char *, uint64_t, uint64_t, uint64_t, char *);
int uwsgi_cache_magic_del(char *, uint16_t, char *);
int uwsgi_cache_magic_exists(char *, uint16_t, char *);
int uwsgi_cache_magic_clear(char *);
struct uwsgi_cache *uwsgi_cache_by_name(char *);
void uwsgi_cache_rlock(struct uwsgi_cache *);
void uwsgi_cache_rwunlock(struct uwsgi_cache *);
char *uwsgi_cache_item_key(struct uwsgi_cache_item *);
struct uwsgi_cache_item *uwsgi_cache_keys(struct uwsgi_cache *, uint64_t *, struct uwsgi_cache_item **);
int uwsgi_add_file_monitor(uint8_t, char *);
int uwsgi_add_timer(uint8_t, int);
int uwsgi_signal_add_rb_timer(uint8_t, int, int);
int uwsgi_user_lock(int);
int uwsgi_user_unlock(int);
int uwsgi_signal_registered(uint8_t);
int uwsgi_signal_add_cron(uint8_t, int, int, int, int, int);
void uwsgi_alarm_trigger(char *, char *, size_t);
void async_schedule_to_req_green(void);
void async_add_timeout(struct wsgi_request *, int);
int async_add_fd_write(struct wsgi_request *, int, int);
int async_add_fd_read(struct wsgi_request *, int, int);
int uwsgi_connect(char *, int, int);
int uwsgi_websocket_handshake(struct wsgi_request *, char *, uint16_t, char *, uint16_t, char *, uint16_t);
int uwsgi_websocket_send(struct wsgi_request *, char *, size_t);
struct uwsgi_buffer *uwsgi_websocket_recv(struct wsgi_request *);
struct uwsgi_buffer *uwsgi_websocket_recv_nb(struct wsgi_request *);
char *uwsgi_chunked_read(struct wsgi_request *, size_t *, int, int);
void uwsgi_disconnect(struct wsgi_request *);
int uwsgi_ready_fd(struct wsgi_request *);
void set_user_harakiri(struct wsgi_request *, int);
int uwsgi_metric_set(char *, char *, int64_t);
int uwsgi_metric_inc(char *, char *, int64_t);
int uwsgi_metric_dec(char *, char *, int64_t);
int uwsgi_metric_mul(char *, char *, int64_t);
int uwsgi_metric_div(char *, char *, int64_t);
int64_t uwsgi_metric_get(char *, char *);
%s
''' % ('\n'.join(uwsgi_cdef), hooks)
cverify = '''
%s
const char *uwsgi_pypy_version = UWSGI_VERSION;
%s
extern struct uwsgi_server uwsgi;
extern struct uwsgi_plugin pypy_plugin;
%s
''' % ('\n'.join(uwsgi_defines), uwsgi_dot_h, hooks)
ffi.cdef(cdefines)
lib = ffi.verify(cverify)
libc = ffi.dlopen(None)
"""
this is a global object point the the WSGI callable
it sucks, i will fix it in the near future...
"""
wsgi_application = None
# fix argv if needed
if len(sys.argv) == 0:
sys.argv.insert(0, ffi.string(lib.uwsgi_binary_path()))
@ffi.callback("void(char *)")
def uwsgi_pypy_execute_source(s):
"""
execute source, we expose it as cffi callback to avoid deadlocks
after GIL initialization
"""
source = ffi.string(s)
exec(source)
@ffi.callback("void(char *)")
def uwsgi_pypy_loader(module):
"""
load a wsgi module
"""
global wsgi_application
m = ffi.string(module)
c = 'application'
if ':' in m:
m, c = m.split(':')
if '.' in m:
mod = __import__(m, None, None, '*')
else:
mod = __import__(m)
wsgi_application = getattr(mod, c)
@ffi.callback("void(char *)")
def uwsgi_pypy_file_loader(filename):
"""
load a mod_wsgi compliant .wsgi file
"""
global wsgi_application
w = ffi.string(filename)
c = 'application'
mod = imp.load_source('uwsgi_file_wsgi', w)
wsgi_application = getattr(mod, c)
@ffi.callback("void(char *)")
def uwsgi_pypy_paste_loader(config):
"""
load a .ini paste app
"""
global wsgi_application
c |
daicang/Euler | p47.py | Python | mit | 702 | 0.002849 | # Problem 47
# First number of first 4 consecutive numbers to have 4 distictive prime factors each
import ite | rtools
from prime import prime
class Solve(object):
def __init__(self):
pass
def solve(self):
def satisfy(x):
"""
Test if x has four distinctive prime factors
"""
return len(prime.prime_factor(x)) == 4
prev_satisfy_counter = 0
for i in ite | rtools.count(start=600):
if satisfy(i):
prev_satisfy_counter += 1
if prev_satisfy_counter == 4:
return i - 3
else:
prev_satisfy_counter = 0
s = Solve()
print s.solve() |
ssssam/nightbus | nightbus/tasks.py | Python | apache-2.0 | 12,497 | 0.00056 | # Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Night Bus: Simple SSH-based build automation'''
import gevent
import yaml
import collections
import itertools
import logging
import os
import time
import nightbus
from nightbus.utils import ensure_list
DEFAULT_SHELL = '/bin/bash -c'
class Task():
'''A single task that we can run on one or more hosts.'''
def __init__(self, attrs, name=None, defaults=None, parameters=None):
defaults = defaults or {}
self.name = name or attrs['name']
includes = ensure_list(defaults.get('include')) + \
ensure_list(attrs.get('include'))
self.script = self._script(
attrs['commands'], prologue=defaults.get('prologue'),
includes=includes, parameters=parameters)
# This gets passed straight to ParallelSSHClient.run_command()
# so it's no problem for its value to be `None`.
self.shell = attrs.get('shell', defaults.get('shell', DEFAULT_SHELL))
def _script(self, commands, prologue=None, includes=None, parameters=None):
'''Generate the script that executes this task.'''
parts = []
if parameters:
for name, value in parameters.items():
parts.append('%s=%s' % (name, value))
if prologue:
parts.append(prologue)
for include in includes:
with open(include) as f:
parts.append(f.read())
parts.append(commands)
return '\n'.join(parts)
class TaskList(list):
'''Contains a user-specified list of descriptions of tasks to run.'''
def __init__(self, text):
contents = yaml.safe_load(text)
if isinstance(contents, list):
defaults = None
entry_list = contents
elif isinstance(contents, dict):
defaults = contents.get('defaults', {})
entry_list = contents['tasks']
else:
raise RuntimeError("Tasks file is invalid.")
for entry in entry_list:
self.extend(self._create_tasks(entry, defaults=defaults))
def _create_tasks(self, entry, defaults=None):
'''Create one or more task objects for a given task list entry.
There can be more than one Task object for an entry due to the
'parameters' option.
'''
if 'parameters' in entry:
tasks = []
parameters = entry['parameters']
# Create an iterable for each parameter containing (name, value)
# pairs, e.g. (('param', 1), ('param', 2), ('param', 3)).
iterables = []
for param_name in sorted(parameters.keys()):
param_values = parameters[param_name]
param_pairs = list(itertools.product([param_name], param_values))
iterables.append(param_pairs)
# From that create a list of every combination of parameter values.
if len(iterables) > 1:
combos = list(itertools.product(*iterables))
else:
combos = [[i] for i in iterables[0]]
# The value of a parameter can be given literally, or given as a
# dict with 'repr' and 'value' keys. The value used in the task may
# not be useful when used in the name of the task, it might be an
# empty string or contain unprintable characters, so you can set
# the `repr` in these cases to something else.
def param_repr(value_entry):
if isinstance(value_entry, dict):
return str(value_entry.get('repr', value_entry['value']))
else:
return str(value_entry)
def param_value(value_entry):
if isinstance(value_entry, dict):
return str(value_entry['value'])
else:
return str(value_entry)
# Finally generate the Task object for each parameter combination.
task_base_name = entry['name']
for combo in combos:
this_parameters = {pair[0]: param_ | value(pair[1]) for pair in combo}
this_parameter_reprs = [param_repr(pair[1]) for pair in combo]
this_name = '.'.join([task_base_name] + this_parameter_reprs)
tasks.append(Task(entry, name=this_name, defaults=defaults,
| parameters=this_parameters))
else:
tasks = [Task(entry, defaults=defaults)]
return tasks
def names(self):
return [task.name for task in self]
class TaskResult():
'''Results of executing a one task on one host.'''
def __init__(self, name, host, duration=None, exit_code=None, message_list=None):
self.name = name
self.host = host
self.duration = duration
self.exit_code = exit_code
self.message_list = message_list
def run_task(client, hosts, task, log_directory, run_name=None, force=False):
'''Run a single task on all the specified hosts.'''
name = task.name
run_name = run_name or name
logging.info("%s: Starting task run", run_name)
start_time = time.time()
# Run the commands asynchronously on all hosts.
cmd = 'task_name=%s\n' % name
if force:
cmd += 'force=yes\n'
cmd += task.script
shell = task.shell
output = client.run_command(cmd, shell=shell, stop_on_errors=True)
# ParallelSSH doesn't give us a way to run a callback when the host
# produces output or the command completes. In order to stream the
# output into separate log files, we run a Greenlet to monitor each
# host.
def watch_output(output, host):
log_filename = safe_filename(run_name + '.' + host + '.log')
log = os.path.join(log_directory, log_filename)
messages = []
with open(log, 'wb') as f:
for line in output[host].stdout:
f.write(line.encode('unicode-escape'))
f.write(b'\n')
if line.startswith('##nightbus '):
messages.append(line[len('##nightbus '):])
duration = time.time() - start_time
exit_code = output[host].exit_code
return nightbus.tasks.TaskResult(
run_name, host, duration=duration, exit_code=exit_code, message_list=messages)
watchers = [gevent.spawn(watch_output, output, host) for host in hosts]
gevent.joinall(watchers, raise_error=True)
logging.info("%s: Started all jobs, waiting for them to finish", run_name)
client.join(output)
logging.info("%s: All jobs finished", run_name)
results = collections.OrderedDict()
for result in sorted((watcher.value for watcher in watchers),
key=lambda result: result.host):
results[result.host] = result
return results
def safe_filename(filename):
# If you want to escape more characters, switch to using re.sub()
return filename.replace('/', '_')
def run_all_tasks(client, hosts, tasks, log_directory, force=False):
'''Loop through each task sequentially.
We only want to run one task on a host at a time, as we assume it'll
maximize at least one of available CPU, RAM and IO. However, if fast hosts
could move onto the next task before slow hosts have finished with the
previous one it might be nice.
'''
all_results = collections.OrderedDict()
number = 1
working_hosts = list(hosts)
for task in tasks:
name = '%i.%s' % (number, task.name)
try:
result_dict = run_task(
client, working_hosts, task, log_dir |
yosshy/nova | nova/api/openstack/compute/server_usage.py | Python | apache-2.0 | 2,859 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
ALIAS = "os-server-usage"
authorize = exte | nsions.os_compute_soft_authorizer(ALIAS)
resp_topic = "OS-SRV-USG"
class ServerUsageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
| super(ServerUsageController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for k in ['launched_at', 'terminated_at']:
key = "%s:%s" % (resp_topic, k)
# NOTE(danms): Historically, this timestamp has been generated
# merely by grabbing str(datetime) of a TZ-naive object. The
# only way we can keep that with instance objects is to strip
# the tzinfo from the stamp and str() it.
server[key] = (instance[k].replace(tzinfo=None)
if instance[k] else None)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class ServerUsage(extensions.V21APIExtensionBase):
"""Adds launched_at and terminated_at on Servers."""
name = "ServerUsage"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ServerUsageController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
DavidMcDonald1993/ghsom | parameter_tests.py | Python | gpl-2.0 | 6,711 | 0.013709 |
# coding: utf-8
# In[12]:
import os
from shutil import copyfile
import subprocess
from save_embedded_graph27 import main_binary as embed_main
from spearmint_ghsom import main as ghsom_main
import numpy as np
import pickle
from time import time
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
#root dir
os.chdir("C:\Miniconda3\Jupyter\GHSOM_simplex_dsd")
#save directory
dir = os.path.abspath("parameter_tests")
#number of times to repeat
num_repeats = 10
#number of nodes in communitiy
s1 = 32
#number of links to same community
z1 = 16
#number of nodes in micro community
minc = s1
maxc = s1
#make save directory
if not os.path.isdir(dir):
os.mkdir(dir)
#change to dir
os.chdir(dir)
#network file names -- output of network generator
network = "network.dat"
first_level = "community.dat"
#community labels
labels = 'firstlevelcommunity'
#mixing parameter
z2 = 16
#node degree
k = z1 + z2
maxk = k
#mixing factors
mu = float(z2) / k
num_communities = [3, 4, 5, 6]
parameter_settings = [0.5, 0.6, 0.7, 0.8, 0.9, 1]
overall_nmi_scores = np.zeros((len(num_communities), len(parameter_settings), num_repeats))
for i in range(len(num_communities)):
# for k1 in num_communities:
k1 = num_communities[i]
#number of nodes in the network
N = k1 * s1
#create directory
dir_string = os.path.join(dir, str(k1))
if not os.path.isdir(dir_string):
os.mkdir(dir_string)
#change working directory
os.chdir(dir_string)
for j in range(len(parameter_settings)):
# for p in parameter_settings:
p = parameter_settings[j]
#ghsom parameters
params = {'w': 0.0001,
'eta': 0.0001,
'sigma': 1,
'e_sg': p,
'e_en': 0.8}
#create directory
dir_string_p = os.path.join(dir_string, str(p))
if not os.path.isdir(dir_string_p):
os.mkdir(dir_string_p)
#change working directory
os.chdir(dir_string_p)
if os.path.isfile('nmi_scores.csv'):
print 'already completed {}/{}, loading scores and continuing'.format(k1, p)
nmi_scores = np.genfromtxt('nmi_scores.csv', delimiter=',')
print nmi_xcores
overall_nmi_scores[i,j,:] = nmi_scores
continue
#copy executable
ex = "benchmark.exe"
if not os.path.isfile(ex):
source = "C:\\Users\\davem\\Documents\\PhD\\Benchmark Graph Generators\\binary_networks\\benchmark.exe"
copyfile(source, ex)
#make benchmark parameter file
filename = "benchmark_flags_{}_{}.dat".format(k1,p)
if not os.path.isfile(filename):
with open(filename,"w") as f:
f.write("-N {} -k {} -maxk {} -minc {} -maxc {} -mu {}".format(N, k, maxk, minc, maxc, mu))
print 'written flag file: {}'.format(filename)
#cmd strings
change_dir_cmd = "cd {}".format(dir_string_p)
generate_network_cmd = "benchmark -f {}".format(filename)
#output of cmd
output_file = open("cmd_output.out", 'w')
#record NMI scores
if not os.path.isfile('nmi_scores.pkl'):
print 'creating new nmi scores array'
nmi_scores = np.zeros(num_repeats)
else:
print 'loading nmi score progress'
nmi_scores = load_obj('nmi_scores')
#record running times
if not os.path.isfile('running_times.pkl'):
print 'creating new running time array'
running_times = np.zeros(num_repeats)
else:
print 'loading running time progress'
running_times = load_obj('running_times')
print
#generate networks
for r in range(1, num_repeats+1):
network_rename = "{}_{}".format(r,network)
first_level_rename = "{}_{}".format(r,first_level)
gml_filename = 'embedded_network_{}.gml'.format(r)
if not os.path.isfile(network_rename):
process = subprocess.Popen(change_dir_cmd + " && " + generate_network_cmd,
stdout=output_file,
stderr=output_file,
shell=True)
process.wait()
print 'generated graph {}'.format(r)
os.rename(network, network_rename)
os.rename(first_level, first_level_rename)
print 'renamed graph {}'.format(r)
if not os.path.isfile(gml_filename):
##embed graph
embed_main(network_rename, first_level_rename)
print 'embedded graph {} as {} in {}'.format(r, gml_filename, os.getcwd())
##score for this network
if not np.all(nmi_scores[r-1]):
start_time = time()
print 'starting ghsom for: {}/{}/{}'.format(k1, p, gml_filename)
nmi_score, communities_detected = ghsom_main(params, gml_filename, labels)
nmi_scores[r-1] = nmi_score
running_time = time() - start_time
print 'running time of algor | ithm: {}'.format(running_time)
running_times[r-1] = running_time
#save
save_obj(nmi_scores, 'nmi_scores')
save_obj(running_times, 'running_times')
print 'saved nmi score for network {}: {}'.format(gml_filename, nmi_score)
print
##output nmi scores to csv file
print 'writing nmi scores and ru | nning times to file'
np.savetxt('nmi_scores.csv',nmi_scores,delimiter=',')
np.savetxt('running_times.csv',running_times,delimiter=',')
print
print 'DONE'
print 'OVERALL NMI SCORES'
print overall_nmi_scores
# In[9]:
for i in range(len(num_communities)):
for j in range(len(parameter_settings)):
scores = overall_nmi_scores[i,j]
# print scores
# idx = np.argsort(scores)[::-1]
# print parameter_settings[idx[0]]
print np.mean(scores)
print np.std(scores) / num_repeats
print
# In[ ]:
|
andree1320z/deport-upao-web | deport_upao/core/forms.py | Python | mit | 224 | 0 | from django import forms
class ContactForm(forms.Form):
n | ame = forms.CharField(label='Nombre', max_length=100)
file = forms.ImageField(label='Imagen | ')
message = forms.CharField(label='Mensaje', max_length=100)
|
ThomasSweijen/yadesolute2 | py/utils.py | Python | gpl-2.0 | 47,303 | 0.043089 | # encoding: utf-8
#
# utility functions for yade
#
# 2008-2009 © Václav Šmilauer <eudoxos@arcig.cz>
"""Heap of functions that don't (yet) fit anywhere else.
Devs: please DO NOT ADD more functions here, it is getting too crowded!
"""
import math,random,doctest,geom,numpy
from yade import *
from yade.wrapper import *
try: # use psyco if available
import psyco
psyco.full()
except ImportError: pass
try:
from minieigen import *
except ImportError:
from miniEigen import *
# c++ implementations for performance reasons
from yade._utils import *
def saveVars(mark='',loadNow=True,**kw):
"""Save passed variables into the simulation so that it can be recovered when the simulation is loaded again.
For example, variables *a*, *b* and *c* are defined. To save them, use::
>>> saveVars('something',a=1,b=2,c=3)
>>> from yade.params.something import *
>>> a,b,c
(1, 2, 3)
those variables will be save in the .xml file, when the simulation itself is saved. To recover those variables once the .xml is loaded again, use
>>> loadVars('something')
and they will be defined in the yade.params.\ *mark* module. The *loadNow* parameter calls :yref:`yade.utils.loadVars` after saving automatically.
If 'something' already exists, given variables will be inserted.
"""
import cPickle
try:
d=cPickle.loads(Omega().tags['pickledPythonVariablesDictionary'+mark]) #load dictionary d
for key in kw.keys():
d[key]=kw[key] #insert new variables into d
except KeyError:
d = kw
Omega().tags['pickledPythonVariablesDictionary'+mark]=cPickle.dumps(d)
if loadNow: loadVars(mark)
def loadVars(mark=None):
"""Load variables from :yref:`yade.utils.saveVars`, which are saved inside the simulation.
If ``mark==None``, all save variables are loaded. Otherwise only those with
the mark passed."""
import cPickle, types, sys, warnings
def loadOne(d,mark=None):
"""Load given dictionary into a synthesized module yade.params.name (or yade.params if *name* is not given). Update yade.params.__all__ as well."""
import yade.params
if mark:
if mark in yade.params.__dict__: warnings.warn('Overwriting yade.params.%s which already exists.'%mark)
modName='yade.params.'+mark
mod=types.ModuleType(modName)
mod.__dict__.update(d)
mod.__all__=list(d.keys()) # otherwise params starting with underscore would not be imported
sys.modules[modName]=mod
yade.params.__all__.append(mark)
yade.params.__dict__[mark]=mod
else:
yade.params.__all__+=list(d.keys())
yade.params.__dict__.update(d)
if mark!=None:
d=cPickle.loads(Omega().tags['pickledPythonVariablesDictionary'+mark])
loadOne(d,mark)
else: # load everything one by one
for m in Omega().tags.keys():
if m.startswith('pickledPythonVariablesDictionary'):
loadVars(m[len('pickledPythonVariableDictionary')+1:])
def SpherePWaveTimeStep(radius,density,young):
r"""Compute P-wave critical timestep for a single (presumably representative) sphere, using formula for P-Wave propagation speed $\Delta t_{c}=\frac{r}{\sqrt{E/\rho}}$.
If you want to compute minimum critical timestep for all spheres in the simulation, use :yref:`yade.utils.PWaveTimeStep` instead.
>>> SpherePWaveTimeStep(1e-3,2400,30e9)
2.8284271247461903e-07
"""
from math import sqrt
return radius/sqrt(young/density)
def randomColor():
"""Return random Vector3 with each component in interval 0…1 (uniform distribution)"""
return Vector3(random.random(),random.random(),random.random())
def typedEngine(name):
"""Return first engine from current O.engines, identified by its type (as string). For example:
>>> from yade import utils
>>> O.engines=[InsertionSortCollider(),NewtonIntegrator(),GravityEngine()]
>>> utils.typedEngine("NewtonIntegrator") == O.engines[1]
True
"""
return [e for e in Omega().engines if e.__class__.__name__==name][0]
def defaultMaterial():
"""Return default material, when creating bodies with :yref:`yade.utils.sphere` and friends, material is unspecified and there is no shared material defined yet. By default, this function returns::
.. code-block:: python
FrictMat(density=1e3,young=1e7,poisson=.3,frictionAngle=.5,label='defaultMat')
"""
return FrictMat(density=1e3,young=1e7,poisson=.3,frictionAngle=.5,label='defaultMat')
def _commonBodySetup(b,volume,geomInertia,material,pos,noBound=False,resetState=True,dynamic=None,fixed=False):
"""Assign common body parameters."""
if isinstance(material,int):
if material<0 and len(O.materials)==0: O.materials.append(defaultMaterial());
b.mat=O.materials[material]
elif isinstance(material,str): b.mat=O.materials[material]
elif isinstance(material,Material): b.mat=material
elif callable(material): b.mat=material()
else: raise TypeError("The 'material' argument must be None (for defaultMaterial), string (for shared material label), int (for shared material id) or Material instance.");
## resets state (!!)
if resetState: b.state=b.mat.newAssocState()
mass=volume*b.mat.density
b.state.mass,b.state.inertia=mass,geomInertia*b.mat.density
b.state.pos=b.state.refPos=pos
b.bounded=(not noBound)
if dynamic!=None:
import warnings
warnings.warn('d | ynamic=%s is deprecated, use fixed=%s instead'%(str(dynamic),str(not dynamic)),category=DeprecationWarning,stacklevel=2)
fixed=not dynamic
b.state.blockedDOFs=('xyzXYZ' if fixed else '')
def sphere(center,radius,dynamic=None,fixed=False,w | ire=False,color=None,highlight=False,material=-1,mask=1):
"""Create sphere with given parameters; mass and inertia computed automatically.
Last assigned material is used by default (*material* = -1), and utils.defaultMaterial() will be used if no material is defined at all.
:param Vector3 center: center
:param float radius: radius
:param float dynamic: deprecated, see "fixed"
:param float fixed: generate the body with all DOFs blocked?
:param material:
specify :yref:`Body.material`; different types are accepted:
* int: O.materials[material] will be used; as a special case, if material==-1 and there is no shared materials defined, utils.defaultMaterial() will be assigned to O.materials[0]
* string: label of an existing material that will be used
* :yref:`Material` instance: this instance will be used
* callable: will be called without arguments; returned Material value will be used (Material factory object, if you like)
:param int mask: :yref:`Body.mask` for the body
:param wire: display as wire sphere?
:param highlight: highlight this body in the viewer?
:param Vector3-or-None: body's color, as normalized RGB; random color will be assigned if ``None``.
:return:
A Body instance with desired characteristics.
Creating default shared material if none exists neither is given::
>>> O.reset()
>>> from yade import utils
>>> len(O.materials)
0
>>> s0=utils.sphere([2,0,0],1)
>>> len(O.materials)
1
Instance of material can be given::
>>> s1=utils.sphere([0,0,0],1,wire=False,color=(0,1,0),material=ElastMat(young=30e9,density=2e3))
>>> s1.shape.wire
False
>>> s1.shape.color
Vector3(0,1,0)
>>> s1.mat.density
2000.0
Material can be given by label::
>>> O.materials.append(FrictMat(young=10e9,poisson=.11,label='myMaterial'))
1
>>> s2=utils.sphere([0,0,2],1,material='myMaterial')
>>> s2.mat.label
'myMaterial'
>>> s2.mat.poisson
0.11
Finally, material can be a callable object (taking no arguments), which returns a Material instance.
Use this if you don't call this function directly (for instance, through yade.pack.randomDensePack), passing
only 1 *material* parameter, but you don't want material to be shared.
For instance, randomized material properties can be created like this:
>>> import random
>>> def matFactory(): return ElastMat(young=1e10*random.random(),density=1e3+1e3*random.random())
...
>>> s3=utils.sphere([0,2,0],1,material=matFactory)
>>> s4=utils.sphere([1,2,0],1,material=matFactory)
"""
b=Body()
b.shape=Sphere(radius=radius,color=color if color else randomColor(),wire=wire,highlight=highlight)
V=(4./3)*math.pi*radius**3
geomInert=(2./5.)*V*radius**2
_commonBodySetup(b,V,Vector3(geomInert,geomInert,geomInert),material,pos=c |
loktacar/wallpapermaker | plugins/simple_resize/simple_resize.py | Python | mit | 734 | 0.004087 | import logging
import pygame
from .. import Collage
class SimpleResize(Collage):
"""
Example class for collage plugins
- Takes a single image and res | izes it
"""
name = 'sim | ple resize'
def __init__(self, config):
super(SimpleResize, self).__init__(config)
def generate(self, size):
wallpapers = self._get_wallpapers()
logging.debug('Generating...')
collage = pygame.Surface(size)
wp_offset, wp = self._resize_wallpaper(wallpapers[0], size)
collage.blit(wp, (0,0), pygame.Rect(wp_offset, size))
logging.debug('Generation complete')
return collage
def _get_wallpapers(self):
return self.wallpaper_source.pop()
|
Ultimaker/Cura | cmake/mod_bundled_packages_json.py | Python | lgpl-3.0 | 2,593 | 0.015812 | #!/usr/bin/env python3
#
# This script removes the given package entries in the bundled_packages JSON files. This is used by the PluginInstall
# CMake module.
#
import argparse
import collections
import json
import os
import sys
def find_json_files(work_dir: str) -> list:
"""Finds all JSON files in the given directory recursively and returns a list of those files in absolute paths.
:param work_dir: The directory to look for JSON files recursively.
:return: A list of JSON files in absolute paths that are found in the given directory.
"""
json_file_list = []
for root, dir_names, file_names in os.walk(work_dir):
for file_name in file_names:
abs_path = os.path.abspath(os.path.join(root, file_name))
json_file_list.append(abs_path)
return json_file_list
def remove_entries_from_json_file(file_path: str, entries: list) -> None:
"""Removes the given entries from the given JSON file. The file will modified in-place.
:param file_path: The JSON file to modify.
:param entries: A list of strings as entries to remove.
:return: None
"""
try:
with open(file_path, "r", encoding = "utf-8") as f:
package_dict = json.load(f, object_hook = collections.OrderedDict)
except Exception as e:
msg = "Failed to load '{file_path}' as a JSON file. This file will be ignored Exception: {e}"\
.format(file_path = file_path, e = e)
sys.stderr.write(msg + os.linesep)
return
for entry in entries:
if entry in package_dict:
del package_dict[entry]
print("[INFO] Remove entry [{entry}] from [{file_path}]".format(file_path = file_path, entry = entry))
try:
with open(file_path, "w", encoding = "utf-8", newline = "\n") as f:
json.dump(package_dict, f, indent = 4)
except Exception as e:
msg = "Failed to write '{file_path}' as a JSON file. Exception: {e}".format(file_path = file_path, e = e)
| raise IOError(msg)
def main() -> None:
parser = argparse.ArgumentParser("mod_bundled_packages_json")
parser.add_argument("-d", "--dir", dest = "work_dir",
help = "The directory to l | ook for bundled packages JSON files, recursively.")
parser.add_argument("entries", metavar = "ENTRIES", type = str, nargs = "+")
args = parser.parse_args()
json_file_list = find_json_files(args.work_dir)
for json_file_path in json_file_list:
remove_entries_from_json_file(json_file_path, args.entries)
if __name__ == "__main__":
main()
|
cescudero/ptavi-p2 | calcplusplus.py | Python | gpl-2.0 | 1,398 | 0.001432 | #!/usr/bin/python3
# -*- coding: utf-8 -*-ç
import sys
import calcoo
import calcoohija
import csv
if __name__ == "__main__":
calc = calcoohija.CalculadoraHija()
with open(sys.argv[1]) as fichero:
reader = csv.reader(fichero)
for operandos in reader:
operacion = operandos[0]
if operacion == "suma":
resultado = calc.suma(int(operandos[1]), int(operandos[2]))
for numero in operandos[3:]:
resultado = calc.suma(int(resultado), int(numero))
print(resultado)
elif operacion == "resta":
resultado = calc.resta(int(operandos[1]), int(operandos[2]))
for numero in operandos[3:]:
resultado = calc.resta(int(resultado), int(numero))
print(resultado)
elif operacion == "multiplica":
resultado = calc.producto(int(operandos[1]), int(operandos[2]))
for numero in operandos[3:]:
resultado = calc.producto(int(resultado), int(num | ero))
print (resultado)
elif operacion == "divide":
resultado = calc.division(int(operandos[1]), | int(operandos[2]))
for numero in operandos[3:]:
resultado = calc.division(int(resultado), int(numero))
print (resultado)
|
dmangot/devops-certifyme | gendocert.py | Python | mit | 2,175 | 0.005057 | #!/usr/bin/env python
import sys
from httplib import HTTPConnection
from urllib import urlencode
from urlparse import urljoin
from json import loads
from reportlab.pdfgen import canvas
OUTPUTFILE = 'certificate.pdf'
def get_brooklyn_integer():
''' Ask Brooklyn Integers for a single i | nteger.
Returns a tuple with number and integer permalink.
From: https://github.com/migurski/ArtisinalInts/
'''
body = 'method=brooklyn.integers.create'
head = {'Content-Ty | pe': 'application/x-www-form-urlencoded'}
conn = HTTPConnection('api.brooklynintegers.com', 80)
conn.request('POST', '/rest/', body, head)
resp = conn.getresponse()
if resp.status not in range(200, 299):
raise Exception('Non-2XX response code from Brooklyn: %d' % resp.status)
data = loads(resp.read())
value = data['integer']
return value
def draw_pdf(sparklydevop):
certimage = './devops.cert.png'
# TODO make this a function of image size
width = 1116
height = 1553
# Times Roman better fits the other fonts on the template
font_name = "Times-Roman"
# TODO make font size a function of name length
font_size = 72
c = canvas.Canvas(OUTPUTFILE, pagesize=(width, height))
c.setFont(font_name, font_size)
# Print Name
name_offset = c.stringWidth(sparklydevop)
try:
c.drawImage(certimage, 1, 1)
except IOError:
print "I/O error trying to open %s" % certimage
else:
c.drawString((width-name_offset)/2, height*3/4, sparklydevop)
# Print Certificate Number
cert_number = "Certificate No. " + str(get_brooklyn_integer())
cert_offset = c.stringWidth(cert_number)
c.drawString((width-cert_offset)/2, height*3/4-font_size*2, cert_number)
c.showPage()
# TODO check for write permissions/failure
try:
c.save()
except IOError:
print "I/O error trying to save %s" % OUTPUTFILE
if __name__ == "__main__":
if len(sys.argv) != 2:
print 'Usage: gendocert.py "Firstname Lastname"'
sys.exit(1)
else:
# TODO if this is run as a CGI need to sanitize input
draw_pdf(sys.argv[1])
|
allenai/allennlp | tests/modules/seq2seq_encoders/pytorch_seq2seq_wrapper_test.py | Python | apache-2.0 | 8,239 | 0.002913 | import numpy
from numpy.testing import assert_almost_equal
import pytest
import torch
from torch.nn import LSTM, GRU
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
class TestPytorchSeq2SeqWrapper(AllenNlpTestCase):
def test_get_dimension_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2SeqWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy())
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(encoder_output.data.numpy(), lstm_tensor.data.numpy())
def test_forward_pulls_out_correct_tensor_for_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.data.tolist(), batch_first=True
)
lstm_output, _ = lstm(packed_sequence)
encoder_output = encoder(input_tensor, mask)
lstm_tensor, _ = pad_packed_sequence(lstm_output, batch_first=True)
assert_almost_equal(
encoder_output.data.numpy(),
lstm_tensor.index_select(0, restoration_indices).data.numpy(),
)
def test_forward_does_not_compress_tensors_padded_to_greater_than_the_max_sequence_length(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 8, 3])
input_tensor[:, 7, :] = 0
mask = torch.ones(5, 8).bool()
mask[:, 7] = False
encoder_output = encoder(input_tensor, mask)
assert encoder_output.size(1) == 8
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2SeqWrapper(lstm)
def test_wrapper_works_when_passed_state_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = False
mask[3, 6:] = False
# Initial states are of shape (num_layers * num_directions, batch_size, hidden_dim)
initial_states = torch.randn(6, 5, 7), torch.randn(6, 5, 7)
_ = encoder(input_tensor, mask, initial_states)
def test_wrapper_can_call_backward_with_zero_length_sequences(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 0:] = 0 # zero length False
mask[3, 6:] = False
output = encoder(input_tensor, mask)
output.sum().backward()
def test_wrapper_stateful(self):
lstm = LSTM(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(lstm, stateful=True)
# To test the stateful functionality we need to call the encoder multiple times.
# Different batch sizes further tests some of the logic.
batch_sizes = [5, 10, 8]
| sequence_lengths = [4, 6, 7]
states = []
for batch_size, sequence_length in zip(batch_sizes, sequence_lengths):
tensor = torch.rand([batch_size, sequence_length, 3])
mas | k = torch.ones(batch_size, sequence_length).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
# Check that the output is masked properly.
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((4, 14)))
for k in range(2):
assert_almost_equal(
states[-1][k][:, -2:, :].data.numpy(), states[-2][k][:, -2:, :].data.numpy()
)
def test_wrapper_stateful_single_state_gru(self):
gru = GRU(bidirectional=True, num_layers=2, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2SeqWrapper(gru, stateful=True)
batch_sizes = [10, 5]
states = []
for batch_size in batch_sizes:
tensor = torch.rand([batch_size, 5, 3])
mask = torch.ones(batch_size, 5).bool()
mask.data[0, 3:] = 0
encoder_output = encoder(tensor, mask)
states.append(encoder._states)
assert_almost_equal(encoder_output[0, 3:, :].data.numpy(), numpy.zeros((2, 14)))
assert_almost_equal(
states[-1][0][:, -5:, :].dat |
ShengRang/c4f | leetcode/rotate-list.py | Python | gpl-3.0 | 617 | 0.027553 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
| def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
xs = []
p = head
while p:
xs. | append(p.val)
p = p.next
if not xs:
return None
k = k % len(xs)
xs = xs[-k:] + xs[:-k]
p = None
while xs:
np = ListNode(xs.pop())
np.next = p
p = np
return p
|
dracos/django | django/shortcuts.py | Python | bsd-3-clause | 5,580 | 0.001613 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Return a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
warnings.warn(
'render_to_response() is deprecated in favor render(). It has the '
'same signature except that it also requires a request.',
RemovedInDjango30Warning, stacklevel=2,
)
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Return a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, permanent=False, **kwargs):
"""
Return an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
Issues a temporary redirect by default; pass permanent=True to issue a
permanent redirect.
"""
redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Use get() to return an object, or raise a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
| return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
| "First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Use filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
try:
obj_list = list(queryset.filter(*args, **kwargs))
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = str(to)
if isinstance(to, str):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
faizan-barmawer/openstack_ironic | ironic/drivers/modules/drac/client.py | Python | apache-2.0 | 4,315 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrapper for pywsman.Client
"""
from xml.etree import ElementTree
from oslo.utils import importutils
from ironic.common import exception
pywsman = importutils.try_import('pywsman')
_SOAP_ENVELOPE_URI = 'http://www.w3.org/2003/05/soap-envelope'
# | Filter Dialects, see (Section 2.3.1):
# http://en.community.dell.com/techcenter/extras/m/white_papers/20439105.aspx
_FILTER_DIALECT_MAP = {'cql': 'http://schemas.dmtf.org/wbem/cql/1/dsp0202.pdf',
'wql': 'http://schemas.microsoft.com/wbem/wsman/1/WQL'}
class Client(object):
def __init__(self, drac_host, drac_port, drac_path, drac_protocol,
dr | ac_username, drac_password):
pywsman_client = pywsman.Client(drac_host, drac_port, drac_path,
drac_protocol, drac_username,
drac_password)
# TODO(ifarkas): Add support for CACerts
pywsman.wsman_transport_set_verify_peer(pywsman_client, False)
self.client = pywsman_client
def wsman_enumerate(self, resource_uri, options, filter_query=None,
filter_dialect='cql'):
"""Enumerates a remote WS-Man class.
:param resource_uri: URI of the resource.
:param options: client options.
:param filter_query: the query string.
:param filter_dialect: the filter dialect. Valid options are:
'cql' and 'wql'. Defaults to 'cql'.
:raises: DracClientError on an error from pywsman library.
:raises: DracInvalidFilterDialect if an invalid filter dialect
was specified.
:returns: an ElementTree object of the response received.
"""
filter_ = None
if filter_query is not None:
try:
filter_dialect = _FILTER_DIALECT_MAP[filter_dialect]
except KeyError:
valid_opts = ', '.join(_FILTER_DIALECT_MAP)
raise exception.DracInvalidFilterDialect(
invalid_filter=filter_dialect, supported=valid_opts)
filter_ = pywsman.Filter()
filter_.simple(filter_dialect, filter_query)
options.set_flags(pywsman.FLAG_ENUMERATION_OPTIMIZATION)
options.set_max_elements(100)
doc = self.client.enumerate(options, filter_, resource_uri)
root = self._get_root(doc)
final_xml = root
find_query = './/{%s}Body' % _SOAP_ENVELOPE_URI
insertion_point = final_xml.find(find_query)
while doc.context() is not None:
doc = self.client.pull(options, None, resource_uri,
str(doc.context()))
root = self._get_root(doc)
for result in root.findall(find_query):
for child in list(result):
insertion_point.append(child)
return final_xml
def wsman_invoke(self, resource_uri, options, method):
"""Invokes a remote WS-Man method.
:param resource_uri: URI of the resource.
:param options: client options.
:param method: name of the method to invoke.
:raises: DracClientError on an error from pywsman library.
:returns: an ElementTree object of the response received.
"""
doc = self.client.invoke(options, resource_uri, method)
return self._get_root(doc)
def _get_root(self, doc):
if doc is None or doc.root() is None:
raise exception.DracClientError(
last_error=self.client.last_error(),
fault_string=self.client.fault_string(),
response_code=self.client.response_code())
root = doc.root()
return ElementTree.fromstring(root.string())
|
avian2/unidecode | unidecode/x077.py | Python | gpl-2.0 | 4,673 | 0.054783 | data = (
'Ming ', # 0x00
'Sheng ', # 0x01
'Shi ', # 0x02
'Yun ', # 0x03
'Mian ', # 0x04
'Pan ', # 0x05
'Fang ', # 0x06
'Miao ', # 0x07
'Dan ', # 0x08
'Mei ', # 0x09
'Mao ', # 0x0a
'Kan ', # 0x0b
'Xian ', # 0x0c
'Ou ', # 0x0d
'Shi ', # 0x0e
'Yang ', # 0x0f
'Zheng ', # 0x10
'Yao ', # 0x11
'Shen ', # 0x12
'Huo ', # 0x13
'Da ', # 0x14
'Zhen ', # 0x15
'Kuang ', # 0x16
'Ju ', # 0x17
'Shen ', # 0x18
'Chi ', # 0x19
'Sheng ', # 0x1a
'Mei ', # 0x1b
'Mo ', # 0x1c
'Zhu ', # 0x1d
'Zhen ', # 0x1e
'Zhen ', # 0x1f
'Mian ', # 0x20
'Di ', # 0x21
'Yuan ', # 0x22
'Die ', # 0x23
'Yi ', # 0x24
'Zi ', # 0x25
'Zi ', # 0x26
'Chao ', # 0x27
'Zha ', # 0x28
'Xuan ', # 0x29
'Bing ', # 0x2a
'Mi ', # 0x2b
'Long ', # 0x2c
'Sui ', # 0x2d
'Dong ', # 0x2e
'Mi ', # 0x2f
'Die ', # 0x30
'Yi ', # 0x31
'Er ', # 0x32
'Ming ', # 0x33
'Xuan ', # 0x34
'Chi ', # 0x35
'Kuang ', # 0x36
'Juan ', # 0x37
'Mou ', # 0x38
'Zhen ', # 0x39
'Tiao ', # 0x3a
'Yang ', # 0x3b
'Yan ', # 0x3c
'Mo ', # 0x3d
'Zhong ', # 0x3e
'Mai ', # 0x3f
'Zhao ', # 0x40
'Zheng ', # 0x41
'Mei ', # 0x42
'Jun ', # 0x43
'Shao ', # 0x44
'Han ', # 0x45
'Huan ', # 0x46
'Di ', # 0x47
'Cheng ', # 0x48
'Cuo ', # 0x49
'Juan ', # 0x4a
'E ', # 0x4b
'Wan ', # 0x4c
'Xian ', # 0x4d
'Xi ', # 0x4e
'Kun ', # 0x4f
'Lai ', # 0x50
'Jian ', # 0x51
'Shan ', # 0x52
'Tian ', # 0x53
'Hun ', # 0x54
'Wan ', # 0x55
'Ling ', # 0x56
'Shi ', # 0x57
'Qiong ', # 0x58
'Lie ', # 0x59
'Yai ', # 0x5a
'Jing ', # 0x5b
'Zheng ', # 0x5c
'Li ', # 0x5d
'Lai ', # 0x5e
'Sui ', # 0x5f
'Juan ', # 0x60
'Shui ', # 0x61
'Sui ', # 0x62
'Du ', # 0x63
'Bi ', # 0x64
'Bi ', # 0x65
'Mu ', # 0x66
'Hun ', # 0x67
'Ni ', # 0x68
'Lu ', # 0x69
'Yi ', # 0x6a
'Jie ', # 0x6b
'Cai ', # 0x6c
'Zhou ', # 0x6d
'Yu ', # 0x6e
'Hun ', # 0x6f
'Ma ', # 0x70
'Xia ', # 0x71
'Xing ', # 0x72
'Xi ', # 0x73
'Gun ', # 0x74
'Cai ', # 0x75
'Chun ', # 0x76
'Jian ', # 0x77
'Mei ', # 0x78
'Du ', # 0x79
'Hou ', # 0x7a
'Xuan ', # 0x7b
'Ti ', # 0x7c
'Kui ', # 0x7d
'Gao ', # 0x7e
'Rui ', # 0x7f
'Mou ', # 0x80
'Xu ', # 0x81
'Fa ', # 0x82
'Wen ', # 0x83
'Miao ', # 0x84
'Chou ', # 0x85
'Kui ', # 0x86
'Mi ', # 0x87
'Weng ', # 0x88
'Kou ', # 0x89
'Dang ', # 0x8a
'Chen ', # 0x8b
'Ke ', # 0x8c
'Sou ', # 0x8d
'Xia | ', # 0x8e
'Qiong ', # 0x8f
'Mao ', # 0x90
'Ming ', # 0x91
'Man ', # 0x92
'Shui ', # 0x93
'Ze ', # 0x94
'Zhang ', # 0x95
'Yi ', # 0x96
'Diao ', # 0x97
'Ou ', # 0x98
'Mo ', # 0x99
'Shun ', # 0x9a
'Co | ng ', # 0x9b
'Lou ', # 0x9c
'Chi ', # 0x9d
'Man ', # 0x9e
'Piao ', # 0x9f
'Cheng ', # 0xa0
'Ji ', # 0xa1
'Meng ', # 0xa2
None, # 0xa3
'Run ', # 0xa4
'Pie ', # 0xa5
'Xi ', # 0xa6
'Qiao ', # 0xa7
'Pu ', # 0xa8
'Zhu ', # 0xa9
'Deng ', # 0xaa
'Shen ', # 0xab
'Shun ', # 0xac
'Liao ', # 0xad
'Che ', # 0xae
'Xian ', # 0xaf
'Kan ', # 0xb0
'Ye ', # 0xb1
'Xu ', # 0xb2
'Tong ', # 0xb3
'Mou ', # 0xb4
'Lin ', # 0xb5
'Kui ', # 0xb6
'Xian ', # 0xb7
'Ye ', # 0xb8
'Ai ', # 0xb9
'Hui ', # 0xba
'Zhan ', # 0xbb
'Jian ', # 0xbc
'Gu ', # 0xbd
'Zhao ', # 0xbe
'Qu ', # 0xbf
'Wei ', # 0xc0
'Chou ', # 0xc1
'Sao ', # 0xc2
'Ning ', # 0xc3
'Xun ', # 0xc4
'Yao ', # 0xc5
'Huo ', # 0xc6
'Meng ', # 0xc7
'Mian ', # 0xc8
'Bin ', # 0xc9
'Mian ', # 0xca
'Li ', # 0xcb
'Kuang ', # 0xcc
'Jue ', # 0xcd
'Xuan ', # 0xce
'Mian ', # 0xcf
'Huo ', # 0xd0
'Lu ', # 0xd1
'Meng ', # 0xd2
'Long ', # 0xd3
'Guan ', # 0xd4
'Man ', # 0xd5
'Xi ', # 0xd6
'Chu ', # 0xd7
'Tang ', # 0xd8
'Kan ', # 0xd9
'Zhu ', # 0xda
'Mao ', # 0xdb
'Jin ', # 0xdc
'Lin ', # 0xdd
'Yu ', # 0xde
'Shuo ', # 0xdf
'Ce ', # 0xe0
'Jue ', # 0xe1
'Shi ', # 0xe2
'Yi ', # 0xe3
'Shen ', # 0xe4
'Zhi ', # 0xe5
'Hou ', # 0xe6
'Shen ', # 0xe7
'Ying ', # 0xe8
'Ju ', # 0xe9
'Zhou ', # 0xea
'Jiao ', # 0xeb
'Cuo ', # 0xec
'Duan ', # 0xed
'Ai ', # 0xee
'Jiao ', # 0xef
'Zeng ', # 0xf0
'Huo ', # 0xf1
'Bai ', # 0xf2
'Shi ', # 0xf3
'Ding ', # 0xf4
'Qi ', # 0xf5
'Ji ', # 0xf6
'Zi ', # 0xf7
'Gan ', # 0xf8
'Wu ', # 0xf9
'Tuo ', # 0xfa
'Ku ', # 0xfb
'Qiang ', # 0xfc
'Xi ', # 0xfd
'Fan ', # 0xfe
'Kuang ', # 0xff
)
|
Elchi3/kuma | kuma/scrape/fixture.py | Python | mpl-2.0 | 10,807 | 0.000833 | """Load test fixtures from a specification."""
import logging
from django.apps import apps
from django.contrib.auth.hashers import make_password
logger = logging.getLogger("kuma.scraper")
class FixtureLoader(object):
"""Load fixtures into the current database."""
# Needed information about the supported Django models for fixtures
# The key is the app_label.model_name, such as wiki.revision for Revision:
# Revision._meta.app_label == 'wiki'
# Revision._meta.model_name == 'revision'
# The value is a dictionary of:
# - natural_key: Properties used to find existing database records
# - relations: Details of properties that are foreign keys
# - filters: Methods to run on values before saving to the database
model_metadata = {
"account.emailaddress": {
"natural_key": ("email",),
"relations": {"user": {"link": "to_one", "resource": "users.user"}},
},
"auth.group": {
"natural_key": ("name",),
"relations": {
"permissions": {"link": "to_many", "resource": "auth.permission"},
},
},
"auth.permission": {
"natural_key": ("codename",),
"relations": {
"content_type": {
"link": "to_one",
"resource": "contenttypes.contenttype",
},
},
},
"contenttypes.contenttype": {"natural_key": ("app_label", "model")},
"database.constance": {"natural_key": ("key",)},
"feeder.bundle": {
"natural_key": ("shortname",),
"relations": {"feeds": {"link": "to_many", "resource": "feeder.feed"}},
},
"feeder.feed": {"natural_key": ("shortname",)},
"search.filter": {
"natural_key": ("name", "slug"),
"relations": {
"group": {"link": "to_one", "resource": "search.filtergroup"},
"tags": {"link": "to_many", "resource": "taggit.tag"},
},
},
"search.filtergroup": {"natural_key": ("name", "slug")},
"sites.site": {"natural_key": ("id",)},
"socialaccount.socialaccount": {
"natural_key": ("uid", "provider"),
"relations": {"user": {"link": "to_one", "resource": "users.user"}},
},
"socialaccount.socialapp": {
"natural_key": ("name",),
"relations": {"sites": {"link": "to_many", "resource": "sites.site"}},
},
"taggit.tag": {"natural_key": ("name",)},
"users.user": {
"natural_key": ("username",),
"relations": {"groups": {"link": "to_many", "resource": "auth.group"}},
"filters": {"password": "make_password"},
},
"users.userban": {
"natural_key": ("user", "by"),
"relations": {
"user": {"link": "to_one", "resource": "users.user"},
"by": {"link": "to_one", "resource": "users.user"},
},
},
"waffle.flag": {"natural_key": ("name",)},
"waffle.switch": {"natural_key": ("name",)},
}
class NeedsDependency(Exception):
"""A fixture has an un-resolved dependency."""
pass
def __init__(self, specification):
"""Intialize with a specification dictionary."""
self.instances = {}
self.spec = self.parse_specification(specification)
def parse_specification(self, specification):
"""Parse and validate the specification."""
parsed = {}
for model_id, items in specification.items():
# Parse and validate the model
metadata = self.model_metadata[model_id]
natural_key_spec = metadata["natural_key"]
relations = metadata.get("relations", {})
filters = metadata.get("filters", {})
assert apps.get_model(model_id)
parsed.setdefault(model_id, [])
for item_num, item in enumerate(items):
# Parse and validate the natural key
key = []
for name in natural_key_spec:
relation = relations.get(name, {})
try:
value = item.pop(name)
except KeyError:
raise ValueError(
'%s %d: Needs key "%s"' % (model_id, item_num, name)
)
else:
if relation:
assert relation["link"] == "to_one"
key.append(tuple(value))
else:
key.append(value)
data = {
"key": tuple(key),
"fields": {},
"relations": {},
}
# Parse and validate the remaining properties
for name, value in item.items():
relation = relations.get(name, {})
if relation:
if relation["link"] == "to_one":
data["relations"][name] = tuple(value)
else:
assert relation["link"] == "to_many"
data["relations"][name] = [tuple(val) for val in value]
elif name in filters:
value_filter = getattr(self, filters[name])
data["fields"][name] = value_filter(value)
else:
data["fields"][name] = value
parsed[model_id].append(data)
return parsed
def load(self):
"""Load items until complete or progress stops."""
if not self.spec:
return
existing, loaded, pending = (0, 0, 0)
cycle = 0
while cycle == 0 or pending:
cycle += 1
last_counts = (existing, loaded, pending)
existing, loaded, pending = self.load_cycle()
logger.info(
"Fixtures cycle %d: %d existing, %d loaded," " %d pending.",
cycle,
existing,
loaded,
pending,
)
if (existing, loaded, pending) == last_counts:
raise RuntimeError("Dependency block detected.")
def load_cycle(self):
"""
Load as many items as we can this cycle.
Returns a tuple of counts:
* Existing items from previous cycles
* Items loaded this cycle
* Items that were unable to be loaded this cycle
"""
existing, loaded, pending = 0, 0, 0
for model_id, items in self.spec.items():
metadata = self.model_metadata[model_id]
Model = apps.get_model(model_id)
self.instances.setdefault(model_id, {})
for item in items:
if item["key"] in self.instances[model_id]:
existing += 1
else:
try:
instance = self.load_item(item, Model, metadata)
except self.NeedsDependency as nd:
relation, key = nd.args
| logger.debug(
"%s %s requires %s %s",
model_i | d,
item["key"],
relation["resource"],
key,
)
pending += 1
else:
self.instances[model_id][item["key"]] = instance
loaded += 1
return existing, loaded, pending
def load_item(self, item, Model, metadata):
"""
Attempt to create or update an item.
Returns the instance if attempt suceeded.
Raises NeedsDependency if a dependency must be created first.
"""
natural_key_spec = metadata["natural_key"]
relations = metadata.get("relations", {})
# Check for required relations in the natural key
for name, key in zip(natural_ |
ChristianAnthony46/PomegranateCMYK | Channel.py | Python | gpl-3.0 | 314 | 0.006369 | from PIL import Image
class Channel:
def __init__(self, channelLabel, size):
self.channelLabel = channelLabel
self.channel = Image.new("CMYK", (size[0], size[1]), "black")
self.pixelMap = self.c | hannel.load()
def save(self, filen | ame):
self.channel.save(filename) |
TomAugspurger/pandas | pandas/tests/test_join.py | Python | bsd-3-clause | 9,296 | 0 | import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm. | assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx | 2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.left_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.outer_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.inner_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_merge_join_categorical_multiindex():
# From issue 16627
a = {
"Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index([" |
rwightman/pytorch-image-models | timm/models/convmixer.py | Python | apache-2.0 | 3,631 | 0.004682 | import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.registry import register_model
from .helpers import build_model_with_cfg
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head',
'first_conv': 'stem.0',
**kwargs
}
default_cfgs = {
'convmixer_1536_20': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1536_20_ks9_p7.pth.tar'),
'convmixer_768_32': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_768_32_ks7_p7_relu.pth.tar'),
'convmixer_1024_20_ks9_p14': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/downloa | d/timm-v1.0/convmixer_1024_20_ks9_p14.pth.tar')
}
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ConvMixer(nn.Module):
| def __init__(self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, activation=nn.GELU, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_features = dim
self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity()
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
activation(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
activation(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
activation(),
nn.BatchNorm2d(dim)
) for i in range(depth)]
)
self.pooling = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten()
)
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.pooling(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_convmixer(variant, pretrained=False, **kwargs):
return build_model_with_cfg(ConvMixer, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs)
@register_model
def convmixer_1536_20(pretrained=False, **kwargs):
model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs)
return _create_convmixer('convmixer_1536_20', pretrained, **model_args)
@register_model
def convmixer_768_32(pretrained=False, **kwargs):
model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, activation=nn.ReLU, **kwargs)
return _create_convmixer('convmixer_768_32', pretrained, **model_args)
@register_model
def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs):
model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs)
return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) |
rdo-infra/ci-config | ci-scripts/infra-setup/roles/rrcockpit/files/telegraf_py3/vexxhost.py | Python | apache-2.0 | 5,854 | 0 | #!/usr/bin/env python
import argparse
import datetime
import json
import os
import re
import subprocess
import time
# This file is running on toolbox periodically
FILE_PATH = 'influxdb_stats_vexx'
SECRETS = "/etc/vexxhostrc"
re_ex = re.compile(r"^export ([^\s=]+)=(\S+)")
def _run_cmd(cmd):
env = os.environ.copy()
with open(SECRETS) as f:
d = {}
for line in f:
if re_ex.match(line):
key, val = re_ex.search(line).groups()
d[key] = val.replace('"', '').replace("'", "")
env.update(d)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, env=env)
outs, errs = p.communicate()
if errs:
for i in errs:
print("ERROR %s" % i)
try:
output = json.loads(outs)
except Exception as e:
print("ERROR %s" % e)
return None, errs
return output, errs
def run_server_check():
cmd = ("openstack server list --project-domain "
"4b633c451ac74233be3721a3635275e5 --long -f json")
out = _run_cmd(cmd)[0]
if not out:
return None
d = {}
statuses = ['ACTIVE', 'BUILD', 'ERROR', 'DELETED']
for s in statuses:
d[s] = len([i for i in out if i['Status'] == s])
d['undercloud'] = len([
i for i in out
if i['Flavor Name'] == 'nodepool'
and "node" in i['Name']
])
d['multinode'] = 0 # can't figure out for vexx
d['bmc'] = len([i for i in out if i['Image Name'] == 'bmc-template'])
d['ovb-node'] = len([i for i in out if i['Image Name'] == 'ipxe-boot'])
d['total'] = len(out)
d['other'] = (
d['total'] - d['ovb-node'] - d['bmc']
- d['undercloud'] - d['multinode'])
return d
def run_quote_check():
cmd = "openstack limits show --absolute --quote all -f json"
out = _run_cmd(cmd)[0]
if not out:
return None
d = {}
d['cores'] = next(
iter([i['Value'] for i in out if 'totalCoresUsed' in i['Name']]), 0)
d['ram'] = next(
iter([i['Value'] for i in out if 'totalRAMUsed' in i['Name']]), 0)
d['instances'] = next(
iter([i['Value'] for i in out if 'totalInstancesUsed' in i['Name']]),
0)
d['gbs'] = next(
iter([i['Value'] for i in out if 'totalGigabytesUsed' in i['Name']]),
0)
return d
def run_fips_count():
cmd = "openstack floating ip list -f json"
out = _run_cmd(cmd)[0]
if not out:
return 0
return len(out)
def run_ports_down_count():
cmd = "openstack port list -f json"
out = _run_cmd(cmd)[0]
if not out:
return 0
downs = [i for i in out if i['Status'] == "DOWN"]
return len(downs)
def run_stacks_check():
cmd = "openstack stack list -f json"
out = _run_cmd(cmd)[0]
if not out:
return None
d = {}
d['stacks_total'] = len(out)
d['create_complete'] = len(
[i for i in out if i['Stack Status'] == 'CREATE_COMPLETE'])
d['create_failed'] = len(
[i for i in out if i['Stack Status'] == 'CREATE_FAILED'])
d['create_in_progress'] = len(
[i for i in out if i['Stack Status'] == 'CREATE_IN_PROGRESS'])
d['delete_in_progress'] = len(
[i for i in out if i['Stack Status'] == 'DELETE_IN_PROGRESS'])
d['delete_failed'] = len(
[i for i in out if i['Stack Status'] == 'DELETE_FAILED'])
d['delete_complete'] = len(
[i for i in out if i['Stack Status'] == 'DELETE_COMPLETE'])
d['old_stacks'] = len([
i for i in out
if int((datetime.datetime.now() - datetime.datetime.strptime(
i['Creation Time'],
'%Y-%m-%dT%H:%M:%SZ')).total_seconds() / 3600) > 5
])
return d
def compose_influxdb_data(servers, quotes, stacks, fips, ports_down, ts):
s = ''
influxdb_data = ''
if servers:
s = 'vexxhost-servers '
s += ('ACTIVE={ACTIVE},BUILD={BUILD},ERROR={ERROR},DELETED={DELETED},'
).format(**servers)
s += ('undercloud={undercloud},multinode={multinode},bmc={bmc},'
'ovb-node={ovb-node},other={other},total={total}'
).format(**servers)
if stacks:
if s:
s += ','
else:
s = 'vexxhost-servers '
s += (
'stacks_total={stacks_total},create_complete={create_complete},'
'create_failed={create_failed},'
'create_in_progress={create_in_progress}'
',delete_in_progress={delete_in_progress},'
'delete_failed={delete_failed},delete_complete={delete_complete},'
'old_stacks={old_stacks}').format(**stacks)
p = ''
if quotes:
quotes.update({'fips': fips})
quotes.update({'ports_down': ports_down})
p = 'vexxhost-perf '
p += ('instances={instances},cores={cores},ram={ram},gigabytes={gbs},'
'fips={fips},ports_down={ports_down}'
).format(**quotes)
nanots = str(int(ts)) + "000000000"
if s:
influxdb_data = s + " %s\n" % nanots
if p:
influxdb_data += p + " %s\n" % nanots
return infl | uxdb_data
def write_influxdb_file(webdir, influxdb_data):
with open(os.path.join(webdir, FILE_PATH), "w") as f:
f.write(influxdb_data) |
def main():
parser = argparse.ArgumentParser(
description="Retrieve cloud statistics")
parser.add_argument(
'--webdir', default="/var/www/html/", help="(default: %(default)s)")
args = parser.parse_args()
servers = run_server_check()
quotes = run_quote_check()
stacks = run_stacks_check()
fips = run_fips_count()
ports_down = run_ports_down_count()
influxdb_data = compose_influxdb_data(
servers, quotes, stacks, fips, ports_down, time.time())
write_influxdb_file(args.webdir, influxdb_data)
if __name__ == '__main__':
main()
|
anupam-mitra/PySpikeSort | spikesort/cluster/__init__.py | Python | gpl-3.0 | 2,703 | 0.007769 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ${FILENAME}
#
# Copyright 2015 Anupam Mitra <anupam.mitra@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; with | out even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import sklearn.metrics.cluster
from . import euclidean
from .. import features
from .. import signals
class SpikeFeatureClustering:
"""
Represents an instance of spike feature clustering process.
Parameters
----------
recording:
Recording from which spikes are detected and sorted.
spike_features:
Object containing the extracted and ranked spike features.
n_features:
Number of features to use for clustering.
cluster_algo:
Clustering algorithm to use for clustering. Currently only 'kMeans'
is supported
"""
def __init__(self, recording, spike_features, n_features, cluster_algo):
self.recording = recording
self.spike_features = spike_features
self.n_features = n_features
self.n_spike_classes = np.unique(self.recording.spike_class).shape[0]
self.n_spikes = self.recording.spike_class.shape[0]
self.cluster_algo = cluster_algo
def cluster_spike_features (self):
features = self.spike_features.get_top_features(self.n_features)
if self.cluster_algo.lower() == 'kmeans':
self.spike_class_est = euclidean.kmeans(features, self.n_spike_classes, feature_scaling=True)
# Not implemented at present, random assignment of class labels
else:
self.spike_class_est = np.random.choice(np.arange(0, self.n_spike_classes), self.n_spikes)
self.ami = sklearn.metrics.adjusted_mutual_info_score(\
self.recording.spike_class, self.spike_class_est)
self.ari = sklearn.metrics.cluster.adjusted_rand_score(\
self.recording.spike_class, self.spike_class_est)
__all__ = [\
"kmeans", \
"SpikeClustering",\
]
|
amadeusproject/amadeuslms | banco_questoes/serializers.py | Python | gpl-2.0 | 5,241 | 0.009593 | """
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter | recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth | Floor, Boston, MA 02110-1301 USA.
"""
import os
import zipfile
import time
from django.db.models import Q
from django.conf import settings
from django.core.files import File
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from subjects.serializers import TagSerializer
from subjects.models import Tag, Subject
from .models import Question, Alternative
class AlternativeSerializer(serializers.ModelSerializer):
alt_img = serializers.CharField(required = False, allow_blank = True, max_length = 255)
def validate(self, data):
files = self.context.get('files', None)
if files:
if data["alt_img"] in files.namelist():
file_path = os.path.join(settings.MEDIA_ROOT, data["alt_img"])
if os.path.isfile(file_path):
dst_path = os.path.join(settings.MEDIA_ROOT, "tmp")
path = files.extract(data["alt_img"], dst_path)
new_name = os.path.join("questions", os.path.join("alternatives", "alternative_" + str(time.time()) + os.path.splitext(data["question_img"])[1]))
os.rename(os.path.join(dst_path, path), os.path.join(settings.MEDIA_ROOT, new_name))
data["alt_img"] = new_name
else:
path = files.extract(data["alt_img"], settings.MEDIA_ROOT)
else:
data["alt_img"] = None
else:
data["alt_img"] = None
return data
class Meta:
model = Alternative
exclude = ('question',)
class QuestionDatabaseSerializer(serializers.ModelSerializer):
categories = TagSerializer(many = True)
alt_question = AlternativeSerializer('get_files', many = True)
question_img = serializers.CharField(required = False, allow_blank = True, max_length = 255)
def get_subject(self, obj):
subject = self.context.get("subject", None)
return subject
def get_files(self, obj):
files = self.context.get("files", None)
return files
def validate(self, data):
files = self.context.get('files', None)
if files:
if data["question_img"] in files.namelist():
file_path = os.path.join(settings.MEDIA_ROOT, data["question_img"])
if os.path.isfile(file_path):
dst_path = os.path.join(settings.MEDIA_ROOT, "tmp")
path = files.extract(data["question_img"], dst_path)
new_name = os.path.join("questions","question_" + str(time.time()) + os.path.splitext(data["question_img"])[1])
os.rename(os.path.join(dst_path, path), os.path.join(settings.MEDIA_ROOT, new_name))
data["question_img"] = new_name
else:
path = files.extract(data["question_img"], settings.MEDIA_ROOT)
else:
data["question_img"] = None
else:
data["question_img"] = None
return data
class Meta:
model = Question
exclude = ('subject', )
def create(self, data):
question_data = data
subject = self.context.get("subject", None)
alternatives = question_data["alt_question"]
del question_data["alt_question"]
question = None
if not Question.objects.filter(enunciado=question_data["enunciado"], subject=subject).exists():
question = Question()
question.enunciado = question_data["enunciado"]
question.question_img = question_data["question_img"]
question.subject = subject
question.save()
tags = data["categories"]
for tag in tags:
if not tag["name"] == "":
if tag["id"] == "":
if Tag.objects.filter(name = tag["name"]).exists():
tag = get_object_or_404(Tag, name = tag["name"])
else:
tag = Tag.objects.create(name = tag["name"])
else:
tag = get_object_or_404(Tag, id = tag["id"])
question.categories.add(tag)
for alt in alternatives:
Alternative.objects.create(question = question, **alt)
return question |
y-sira/atcoder | abc002/a.py | Python | mit | 51 | 0 | x, y = map(int, inpu | t().split())
print(ma | x(x, y))
|
qtumproject/qtum | contrib/linearize/linearize-data.py | Python | mit | 13,632 | 0.003301 | #!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
import glob
from collections import namedtuple
from binascii import unhexlify
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.hex()
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r", encoding="utf8")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# This gets the first block file ID that exists from the input block
# file directory.
def getFirstBlockFileId(block_dir_path):
# First, this sets up a pattern to search for block files, for
# example 'blkNNNNN.dat'.
blkFilePattern = os.path.join(block_dir_path, "blk[0-9][0-9][0-9][0-9][0-9].dat")
# This search is done with glob
blkFnList = glob.glob(blkFilePattern)
if len(blkFnList) == 0:
print("blocks not pruned - starting at 0")
return 0
# We then get the lexicographic minimum, which should be the first
# block file name.
firstBlkFilePath = min(blkFnList)
firstBlkFn = os.path.basename(firstBlkFilePath)
# now, the string should be ['b','l','k','N','N','N','N','N','.','d','a','t']
# So get the ID by choosing: 3 4 5 6 7
# The ID is not necessarily 0 if this is a pruned node.
blkId = int(firstBlkFn[3:8])
return blkId
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
# Get first occurring block file id - for pruned nodes this
# will not necessarily be 0
self.inFn = getFirstBlockFileId(self.settings['input'])
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
| self.outF.write(rawblock)
self.outsz = self.outsz + len(inh | dr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
|
skewerr/deskbot | modules/commands/tell.py | Python | bsd-3-clause | 4,602 | 0.026728 | import re
from .. import irc, var, ini
from ..tools import is_identified
# Require identification with NickServ to send messages.
def ident (f):
def check (user, channel, word):
if is_identified(user):
f(user, channel, word)
else:
irc.msg(channel, "{}: Identify with NickServ first.".format(user))
return check
# Insert a message monitor to look for user activity.
def ins_monitor (line_obj):
if line_obj.event in ["JOIN", "PRIVMSG"]:
send_messages(line_obj.user)
# Fill commands dictionary.
def ins_command ():
var.commands["tell"] = type("command", (object,), {})()
var.commands["tell"].method = leave_message
var.commands["tell"].aliases = [".tell", ".msg"]
var.commands["tell"].usage = ["{} user message - Leave a message to user."]
var.commands["listtell"] = type("command", (object,), {})()
var.commands["listtell"].method = list_messages
var.commands["listtell"].aliases = [".listtell", ".ltell", ".listtells", ".showtells"]
var.commands["listtell"].usage = ["{} - Check if you have any messages and show them."]
# Fill a space for the messages database.
def ins_db ():
var.data["messages"] = ini.fill_dict("messages.ini", "Messages")
# Turning list of strings into a list of tuples.
for user in var.data["messages"]:
msg_list = [(msg.split(" ~ ")[0], msg.split(" ~ ", 1)[1]) for msg in var.data["messages"][user]]
var.data["messages"][user] = msg_list
# Leave a message to someone.
def leave_message (user, channel, word):
# It needs a nickname and a message.
if len(word) < 3:
irc.msg(channel, "{}: Wrong syntax. Check .help".format(user))
return
target = word[1]
message = " ".join(word[2:])
# Check if target is a valid nickname.
match = re.match("[a-zA-Z\[\]\\`_\^\{\|\}][a-zA-Z0-9\[\]\\`_\^\{\|\}]+", target)
if not match or (hasattr(match, "group") and match.group() != target):
irc.msg(channel, "{} is not a valid nickname.".format(target))
return
# Check for "hurr Imma tell myself something".
if target.lower() == user.lower():
irc.msg(channel, "{}: Do it yourself. I'm not .tell'ing you shit!".format(user))
return
# The bot won't tell itself something.
if target.lower() == irc.botnick.lower():
irc.msg(channel, "{}: I'm right here, say it to my face!".format(user))
return
# Check for repeated messages.
if target in var.data["messages"]:
if (user, message) in var.data["messages"][target]:
irc.msg(channel, "{}: You already left this message.".format(user))
return
# Create an empty list for users not in the database.
if target not in var.data["messages"]:
var.data["messages"][target] = []
# Append tuple and add to ini.
var.data["messages"][target].append((user, message))
message_list = ["{} ~ {}".format(pa | ir[0], pair[1]) for pair in var.data["messages"][target]]
ini.add_to_ini("Messages", target, "\n".join(message_list), "messages.ini")
irc.msg(channel, "{}: Message stored.".format(user))
# Send a user stored | messages.
def send_messages (user):
# Be case insensitive, please.
for nick in var.data["messages"]:
if user.lower() == nick.lower():
user = nick
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
return
if len(var.data["messages"][user]) > 4:
# Send the first 4 messages.
for pair in var.data["messages"][user][0:4]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove the sent messages.
st_messages = var.data["messages"][user][0:4]
for pair in st_messages:
var.data["messages"][user].remove(pair)
new_messages = ["{} ~ {}".format(pair[0], pair[1]) for pair in var.data["messages"][user]]
ini.add_to_ini("Messages", user, "\n".join(new_messages), "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
irc.msg(user, "You have more messages. Type \x034.showtells\x0f to view them.")
else:
# Send every message.
for pair in var.data["messages"][user]:
irc.msg(user, "{} sent you: {}".format(pair[0], pair[1]))
irc.msg(pair[0], "{} received your message.".format(user))
# Remove them.
del var.data["messages"][user]
ini.remove_from_ini("Messages", user, "messages.ini")
irc.msg(user, "To reply to them, use .tell user message")
# Send the rest of the messages.
def list_messages (user, channel, word):
# There's no use going on if the user isn't in the messages database.
if user not in var.data["messages"]:
irc.msg(channel, "{}: You don't have any messages.".format(user))
return
send_messages(user)
irc.msg(channel, "{}: Sent ;)".format(user))
|
ywangd/stash | bin/gh.py | Python | mit | 6,911 | 0.011431 | # coding: utf-8
'''
Usage: gh <command> [<args>...]
gh <command> (-h|--help)
supported commands are:
gh fork <repo> forks user/repo
gh create <repo> creates a new repo
gh pull <repo> <base> <head> create a pull request
gh list_keys list user keys
gh create_key <title> [<public_key_path>] add a key to github (or create new key if none exist)
For all commands, use gh <command> --help for more detailed help
NOTE: assumes a keychain user/pass stored in keychainservice='stash.git.github.com', which is also the default from the git module.
'''
from __future__ import print_function
import os
import sys
from functools import wraps
from six.moves import input
_stash = globals()["_stash"]
try:
import github
except ImportError:
print("Could not import 'github', installing it...")
_stash("pip install pygithub")
import github
try:
import docopt
except ImportError:
print("Could not import 'docopt', installing it...")
_stash("pip install docopt")
from docopt import docopt
from github import Github
import keychain, console, inspect
class GitHubRepoNotFoundError(Exception):
pass
def command(func):
@wraps(func)
def tmp(argv):
if len(argv) == 1:
if func.__name__ not in ['gh_list_keys']:
argv.append('--help')
try:
args = docopt(func.__doc__, argv=argv)
return func(args)
except SystemExit as e:
print(e)
return tmp
@command
def gh_fork(args):
'''Usage: gh fork <repo>
Fork a repo to your own github account.
<repo> - repo name of form user/repo
'''
console.show_activity()
g, user = setup_gh()
try:
other_repo = g.get_repo(args['<repo>'])
if other_repo:
mine = user.create_fork(other_repo)
print('fork created: {}/{}'.format(mine.owner.login, mine.name))
else:
pass
finally:
console.hide_activity()
@command
def gh_create(args):
'''Usage: gh create [options] <name>
Options:
-h, --help This message
-s <desc>, --description <desc> Repo description
-h <url>, --homepage <url> Homepage url
-p, --private private
-i, --has_issues has issues
-w, --has_wiki has wiki
-d, --has_downloads has downloads
-a, --auto_init create readme and first commit
-g <ign>, --gitignore_template <ign> create gitignore using string
'''
kwargs = {key[2:]: value for key, value in args.items() if key.startswith('--') and value}
console.show_activity()
try:
g, user = setup_gh()
r = user.create_repo(args['<name>'], **kwargs)
print('Created %s' % r.html_url)
finally:
console.hide_activity()
def parse_branch(userinput):
if ':' in userinput:
owner, branch = userinput.split(':')
else:
owner = userinput
branch = 'master'
return owner, branch
def parent_owner(user, reponame):
return user.get_repo(reponame).parent.owner.login
@command
def gh_pull(args):
'''Usage:
gh pull <reponame> <base> [<head>]
gh pull <reponame> <base> [<head>] --title <title> [--body <body>]
gh pull <reponame> <base> [<head>] -i <issue>
Options:
-h, --help This message
-t <title>, --title <title> Title of pull request
-b <body>, --body <body> Body of pull request [default: ]
-i <issue>, --issue <issue> Issue number
Examples:
gh pull stash ywangd jsbain
gh pull stash ywangd:dev jsbain:dev
gh pull stash :dev :master
base and head should be in the format owner:branch.
if base owner is omitted, owner of parent repo is used.
if head owner is omitted, user is used
'''
console.show_activity()
try:
g, user = setup_gh()
reponame = args['<reponame>']
baseowner, basebranch = parse_branch(args['<base>'])
if not baseowner:
baseowner = parent_owner(reponame)
if not args['<head>']:
args['<head>'] = ':'
headowner, headbranch = parse_branch(args['<head>'])
if not headowner:
headowner = user.login
baserepo = g.get_user(baseowner).get_repo(reponame)
kwargs = {}
if args['--issue']:
kwargs['issue'] = baserepo.get_issue(args['--issue'])
elif not args['--title']:
kwargs['title'] = input('Enter pull title:')
kwargs['body'] = input('Enter pull body:')
else:
kwargs['title'] = args['--title']
kwargs['body'] = args['--body'] or ''
kwargs['base'] = basebranch
kwargs['head'] | = ':'.join([headowner, headbranch])
pullreq = baserepo.create_pull(**kwargs)
print('Created pull %s' % pullreq.html_url)
print('Commits:')
print([(x.sha, x.commit.message) for x in pullreq.get_commits()])
print('Changed Files:')
print([x.filename for x in pullreq.get_files()])
finally:
console.hide_activity()
print('success')
|
@command
def gh_list_keys(args):
'''Usage:
gh list_keys [options]
Options:
-h, --help This message
List keys
'''
g, u = setup_gh()
for key in u.get_keys():
print('{}:\n {}\n'.format(key.title, key.key))
@command
def gh_create_key(args):
'''Usage:
gh create_key <title> [<public_key_path>]
Options:
-h, --help This message
Examples:
gh create_key ipad ~/.ssh/id_rsa.pub
gh create_key ipad (checks for ~/.ssh/id_rsa.pub, or creates new key if needed using ssh-keygen )
'''
title = args['<title>']
default_keyfile = os.path.expanduser('~/.ssh/id_rsa.pub')
if not args['<public_key_path>']:
if not os.path.exists(default_keyfile):
print('Creating a ssh key in ~/.ssh/')
cmd_string = '''
echo ssh-keygen -d rsa -b2048
ssh-keygen -trsa -b2048
'''
_stash(cmd_string)
args['<public_key_path>'] = default_keyfile
#if private key, use pub key
if not args['<public_key_path>'].endswith('.pub'):
args['<public_key_path>'] += '.pub'
if not os.path.exists(args['<public_key_path>']):
raise Exception('Public Key file not found!')
g, u = setup_gh()
with open(args['<public_key_path>']) as pubkey:
u.create_key(title, pubkey.read())
def setup_gh():
keychainservice = 'stash.git.github.com'
user = dict(keychain.get_services())[keychainservice]
pw = keychain.get_password(keychainservice, user)
g = Github(user, pw)
u = g.get_user()
return g, u
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
sys.argv.append('--help')
args = docopt(__doc__, version='0.1', options_first=True)
cmd = args['<command>']
argv = [cmd] + args['<args>']
try:
func = locals()['gh_%s' % cmd]
except KeyError:
print('No such cmd')
print(__doc__)
raise
func(argv)
|
ksetyadi/Sahana-Eden | controllers/org.py | Python | mit | 9,210 | 0.008686 | # -*- coding: utf-8 -*-
""" Organisation Registry - Controllers
@author: Fran Boon
@author: Michael Howden
"""
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions" Views)
response.menu_options = org_menu
#==============================================================================
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[prefix].name_nice
return dict(module_name=module_name)
#==============================================================================
def cluster():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def cluster_subsector():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def organisation():
""" RESTful CRUD controller """
# Post-processor
def postp(r, output):
# No point in downloading large dropdowns which we hide, so provide a smaller represent
if r.component and r.component_name in ["office", "project", "store", "assess", "activity"]:
db[r.component.tablename].location_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "gis_location.id"))
response.s3.gis.location_id = r.component.tablename + "_location_id"
return output
response.s3.postp = postp
rheader = lambda r: shn_org_rheader(r,
tabs = [(T("Basic Details"), None),
(T("Staff"), "staff"),
(T("Offices"), "office"),
(T("Warehouses"), "store"),
(T("Assessments"), "assess"),
(T("Projects"), "project"),
(T("Activities"), "activity"),
#(T("Tasks"), "task"),
#(T("Donors"), "organisation"),
#(T("Sites"), "site"), # Ticket 195
])
output = s3_rest_controller(prefix, resourcename, rheader=rheader)
return output
#==============================================================================
def office():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
if isinstance(request.vars.organisation_id, list):
request.vars.organisation_id = request.vars.organisation_id[0]
# Pre-processor
def prep(r):
# No point in downloading large dropdowns which we hide, so provide a smaller represent
# the update forms are not ready. when they will - uncomment this and comment the next one
#if r.method in ("create", "update"):
if r.method == "create":
table.organisation_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "org_organisation.id"))
if request.vars.organisation_id and \
request.vars.organisation_id != "None":
session.s3.organisation_id = request.vars.organisation_id
# Organisation name should be displayed on the form if organisation_id is pre-selected
orgs = db.org_organisation
query = orgs.id == int(session.s3.organisation_id)
session.s3.organisation_name = db(query).select(orgs.name, limitby=(0, 1)).first().name
return True
response.s3.prep = prep
rheader = lambda r: shn_org_rheader(r,
tabs = [(T("Basic Details"), None),
(T("Contact Data"), "pe_contact"),
(T("Staff"), "staff"),
])
return s3_rest_controller(prefix, resourcename, rheader=rheader)
#==============================================================================
def staff():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
# No point in downloading large dropdowns which we hide, so provide a smaller represent
# the update forms are not ready. when | they will - uncomment this and comment the next one
#if r.method in ("create", "update"):
if r.method == "create":
# person_id mandatory for a staff!
table.person_id.requires = IS_ONE_OF_EMPTY | (db, "pr_person.id")
table.organisation_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "org_organisation.id"))
table.office_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "org_office.id"))
return True
response.s3.prep = prep
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def donor():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller(prefix, resourcename)
return output
#==============================================================================
# Component Resources need these settings to be visible where they are linked from
# - so we put them outside their controller function
tablename = "%s_%s" % (prefix, "donor")
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
subtitle_list = T("Donors"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
#==============================================================================
def shn_org_rheader(r, tabs=[]):
""" Organisation Registry page headers """
if r.representation == "html":
rheader_tabs = shn_rheader_tabs(r, tabs)
if r.name == "organisation":
#_next = r.here()
#_same = r.same()
organisation = r.record
if organisation.cluster_id:
_sectors = shn_sector_represent(organisation.cluster_id)
else:
_sectors = None
try:
_type = org_organisation_type_opts[organisation.type]
except KeyError:
_type = None
rheader = DIV(TABLE(
TR(
TH(T("Organization") + ": "),
organisation.name,
TH(T("Cluster(s)") + ": "),
_sectors
),
TR(
#TH(A(T("Edit Organization"),
# _href=URL(r=request, c="org", f="organisation", args=[r.id, "update"], vars={"_next": _next})))
TH(T("Type") + ": "),
_type,
)
), rheader_tabs)
return rheader
elif r.name == "office":
#_next = r.here()
#_same = r.same()
office = r.record
organisation = db(db.org_organisation.id == office.organisation_id).select(db.org_organisation.name, limitby=(0, 1)).first()
if organi |
avatartwo/avatar2 | avatar2/targets/jlink_target.py | Python | apache-2.0 | 1,183 | 0.003381 | import sys
from avatar2.targets import Target, TargetStates
from avatar2.protocols.jlink import JLinkProtocol
from avatar2.watchmen import watch
if sys.version_info < (3, 0):
from Queue import PriorityQueue
else:
from queue import PriorityQueue
class JLinkTarget(Target | ):
def __init__(self, avatar, serial, device, **kwargs):
"""
Create a JLink target instance
:param avatar: The avatar instance
:param serial: The JLink's serial number
:param device: The Device string to use (e.g., ARM7, see JlinkExe for the list)
:param kwargs:
"""
super(JLinkTarget, self).__init__(avatar, **kwargs | )
self.avatar = avatar
self.serial = serial
self.device = device
@watch("TargetInit")
def init(self):
jlink = JLinkProtocol(serial=self.serial, device=self.device, avatar=self.avatar, origin=self)
self.protocols.set_all(jlink)
if jlink.jlink.halted():
self.state = TargetStates.STOPPED
else:
self.state = TargetStates.RUNNING
#self.wait()
def reset(self, halt=True):
self.protocols.execution.reset(halt=halt) |
MaxMorgenstern/EmeraldAI | EmeraldAI/Pipelines/ResponseProcessing/__init__.py | Python | apache-2.0 | 30 | 0 | __al | l__ = ["ProcessRe | sponse"]
|
pouyaAB/ros_teleoperate | CPR_mover/setup.py | Python | mit | 391 | 0.002558 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
pa | ckages=['cpr_mover_controller'],
package_dir={'': 'scripts'},
requires=['std_msgs', 'rospy', 'geomet | ry_msgs', 'sensor_msgs']
)
setup(**setup_args)
|
MostlyOpen/odoo_addons | myo_event/models/annotation.py | Python | agpl-3.0 | 1,400 | 0 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public L | icense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the im | plied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Event(models.Model):
_inherit = 'myo.event'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_event_annotation_rel',
'event_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
event_ids = fields.Many2many(
'myo.event',
'myo_event_annotation_rel',
'annotation_id',
'event_id',
'Events'
)
|
Auzzy/pyinq | examples/suite_class_tests.py | Python | isc | 407 | 0.066339 | from pyinq.tags import *
@testClass(suite="suite1")
class Class1(object):
@test
def test1 | ():
assert True
@test
def test2():
assert True
@testClass(suite="suite2")
class Class2(object):
@test(suite="suite1")
def test3():
assert True
@test(suite="suite2")
def test4():
assert True
@testClass
class Class3(object):
@test
def test5():
assert True
@test
def test6():
assert True | |
TalentedComponent/SDC | click_and_crop.py | Python | gpl-3.0 | 1,905 | 0.032021 | # import the necessary packages
import argparse
import cv2
# initialize the list of reference points and boolean indicating
# whether croppi | ng is being performed or not
refPt = []
cropping = | False
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global refPt, cropping
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
refPt.append((x, y))
cropping = False
# draw a rectangle around the region of interest
cv2.rectangle(image, refPt[0], refPt[1], (255, 255, 0), 2)
cv2.imshow("image", image)
# construct the argument parser and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True, help="Path to the image")
#args = vars(ap.parse_args())
# load the image, clone it, and setup the mouse callback function
image = cv2.imread('car.jpeg')
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# if the 'r' key is pressed, reset the cropping region
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
# if there are two reference points, then crop the region of interest
# from teh image and display it
if len(refPt) == 2:
roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("ROI", roi)
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows()
|
tuomasjjrasanen/t2jrbot | lib/plugins/command.py | Python | gpl-3.0 | 4,817 | 0.001246 | # -*- coding: utf-8 -*-
# Command plugin for t2jrbot.
# Copyright © 2014 Tuomas Räsänen <tuomasjjrasanen@tjjr.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import t2jrbot.conf
class _CommandPlugin(object):
def __init__(self, bot):
self.__bot = bot
self.__command_handlers = {}
self.__command_descriptions = {}
self.__pre_eval_hooks = {}
self.__bot.add_irc_callback(self.__irc_privmsg, command="PRIVMSG")
self.register_command("!help", self.__command_help,
"Since you got this far, "
"you already know what this command does.")
def release(self):
pass
def __command_help(self, nick, host, channel, this_command, argstr):
command = argstr.strip()
if not command:
commands = sorted(self.__command_descriptions.keys())
self.__bot.irc.send_privmsg(channel,
"%s: Commands: %s"
% (nick, ", ".join(commands)))
self.__bot.irc.send_privmsg(channel,
"%s: To get detailed help on a command, "
"use %s COMMAND, e.g. %s %s"
% (nick, this_command, this_command, this_command))
else:
try:
descr = self.__command_descriptions[command]
except KeyError:
self.__bot.irc.send_privmsg(channel,
"%s: command '%s' not found" % (nick, command))
else:
self.__bot.irc.send_privmsg(channel, "%s: %s - %s"
% (nick, command, descr))
def add_pre_eval_hook(self, hook, command=None):
hooks = self.__pre_eval_hooks.setdefault(command, set())
hooks.add(hook)
def register_command(self, command, handler, description=""):
if command in self.__command_handlers:
raise Error("command '%s' is already registered" % command)
self.__command_handlers[command] = handler
self.__command_descriptions[command] = description
def unregister_command(self, command):
try:
| del self.__command_handlers[command]
except KeyError:
raise Error("command '%s' is not registered" % command)
del self.__command_descriptions[command]
def __irc_privmsg(self | , prefix, this_command, params):
nick, sep, host = prefix.partition("!")
target, text = params
if target == self.__bot.nick:
# User-private messages are not supported and are silently
# ignored.
return
channel = target
# Ignore all leading whitespaces.
text = text.lstrip()
if not text.startswith("%s:" % self.__bot.nick):
# The message is not designated to me, ignore.
return
# Strip my nick from the beginning of the text.
commandstr = text[len("%s:" % self.__bot.nick):].lstrip()
command, _, argstr = commandstr.partition(' ')
self.__eval_command(nick, host, channel, command, argstr)
def __eval_command(self, nick, host, channel, command, argstr):
hooks = set()
hooks.update(self.__pre_eval_hooks.get(command, set()),
self.__pre_eval_hooks.get(None, set()))
if not all([hook(nick, host, channel, command, argstr) for hook in hooks]):
return
try:
command_handler = self.__command_handlers[command]
except KeyError:
# Silently ignore all input except registered commands.
return
try:
command_handler(nick, host, channel, command, argstr)
except Exception, e:
self.__bot.irc.send_privmsg(channel,
"%s: error: %s" % (nick, e.message))
def check_conf(conf):
t2jrbot.conf.check_keys(conf, ())
def load(bot, conf):
check_conf(conf)
return _CommandPlugin(bot)
|
andrewyoung1991/abjad | abjad/tools/scoretools/__init__.py | Python | gpl-3.0 | 278 | 0.014388 | # -*- | encoding: utf-8 -*-
'''Dependencies:
The ``scoretools`` package should not import ``instrumenttools``
at top level.
'''
fr | om abjad.tools import systemtools
systemtools.ImportManager.import_structured_package(
__path__[0],
globals(),
)
_documentation_section = 'core' |
yfilali/graphql-pynamodb | examples/flask_pynamodb/app.py | Python | mit | 552 | 0.001812 | from database import init_db
from flask import Flask
from flask_graphql import GraphQLView
from schema import schema
app = Flask(__name__)
app.debug = True
|
default_query = '''
{
allEmployees {
edges {
node {
id,
name,
department {
id,
name
} | ,
role {
id,
name
}
}
}
}
}'''.strip()
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
if __name__ == '__main__':
init_db()
app.run()
|
alexpilotti/python-glanceclient | glanceclient/common/progressbar.py | Python | apache-2.0 | 3,171 | 0 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
class _ProgressBarBase(object):
"""
Base abstract class used by specific class wrapper to show a progress bar
when the wrapped object are consumed.
:param wrapped: Object to wrap that hold data to be consumed.
:param totalsize: The total size of the data in the wrapped object.
:note: The progress will be displayed only if sys.stdout is a tty.
"""
def __init__(self, wrapped, totalsize):
self._wrapped = wrapped
self._totalsize = float(totalsize)
self._show_progress = sys.stdout.isatty() and self._totalsize != 0
self._percent = 0
def _display_progress_bar(self, size_read):
if self._show_progress:
self._percent += size_read / self._totalsize
# Output something like this: [==========> ] 49%
sys.stdout.write('\r[{0:<30}] {1:.0%}'.format(
'=' * int(round(self._percent * 29)) + '>', self._percent
))
sys.stdout.flush()
def __getattr__(self, attr):
# Forward other attribute access to the wrapped object.
return getattr(self._wrapped, attr)
class VerboseFileWrapper(_ProgressBarBase):
"""
A file wrapper that show and advance a progress bar whenever file's read
method is called.
"""
def read(self, *args, **kwargs):
data = self._wrapped.read(*args, **kwargs)
if data:
self._display_progress_bar(len(data))
else:
if self._show_progress:
# Break to a new line from the progress bar for incoming
# output.
sys.stdout.write('\n')
return data
class VerboseIteratorWrapper(_ProgressBarBase):
"""
An iterator wrapper that show and advan | ce a progress bar whenever
data is consumed from the iterator.
:note: Use only with iterator that yield strings.
"""
def __iter__(self):
return self
def next(self):
try:
data = six.next(self._wrapped)
# NOTE(mouad): Assuming that data is a string b/c otherwise calling
| # len function will not make any sense.
self._display_progress_bar(len(data))
return data
except StopIteration:
if self._show_progress:
# Break to a new line from the progress bar for incoming
# output.
sys.stdout.write('\n')
raise
# In Python 3, __next__() has replaced next().
__next__ = next
|
jailuthra/onetimepad | onetimepad.py | Python | mit | 2,562 | 0.005855 | #!/usr/bin/env python3
# Copyright (C) 2013-2014 Jai Luthra <me@jailuthra.in>
# See LICENSE file for more details
'''En | crypt or Decrypt data using One-Time Pad'''
import binascii, argparse, itertools
def main():
parser = argparse.ArgumentParser(
description='Encrypt or Decrypt data using One-Time Pad')
parser.add_argument('-d', '--decrypt', action='store_true',
help='Decrypt data (default is to encrypt)')
parser.add_argument('-f', '--file', dest='filename',
help='File name to encrypt/decrypt')
args = parser.parse_args()
fname = | args.filename
# Decrypt Mode
if args.decrypt:
if fname == None:
cipher = input('Cipher: ')
key = input('Key: ')
print('Message:', decrypt(cipher, key))
else: # Read from a file
with open(fname, 'r') as cryptfile:
key = input('Key: ')
cipher = cryptfile.read()
print(decrypt(cipher, key))
# Encrypt Mode (default)
else:
if fname == None:
msg = input('Message: ')
key = input('Key: ')
if len(key) < len(msg):
print('\nWARNING: Key size less than the message is unsafe')
print('Cipher:', encrypt(msg, key))
else: # Write to a file
with open(fname, 'r') as f:
key = input('Key: ')
msg = f.read()
if len(key) < len(msg):
print('\nWARNING: Key size less than the message is unsafe')
cryptfile = open(fname + '.otpp', 'w')
cryptfile.write(encrypt(msg, key))
cryptfile.close()
print('Encrypted data has been written to', fname+'.otpp')
def encrypt(msg, key):
'''Return cipher text'''
cipher = xor_str(msg, key)
# ascii armor the cipher text
cipher = (binascii.hexlify(cipher.encode())).decode()
return cipher
def decrypt(cipher, key):
'''Return plain text message'''
# get back the string from ascii armored cipher
cipher = (binascii.unhexlify(cipher.encode())).decode()
msg = xor_str(cipher, key)
return msg
def xor_str(a, b):
'''Return the xor of the two strings a and b
The length of the output string is the same as that of first string,
which means that if second string is shorter than first, it'll be repeated
over.'''
xorred = ''.join([chr(ord(x)^ord(y)) for x, y in zip(a, itertools.cycle(b))])
return xorred
if __name__ == "__main__":
main()
|
RDFLib/rdflib | rdflib/graph.py | Python | bsd-3-clause | 84,117 | 0.001034 | from typing import (
IO,
Any,
BinaryIO,
Iterable,
Optional,
TextIO,
Union,
Type,
cast,
overload,
Generator,
Tuple,
)
import logging
from warnings import warn
import random
from rdflib.namespace import Namespace, RDF
from rdflib import plugin, exceptions, query, namespace
import rdflib.term
from rdflib.term import BNode, IdentifiedNode, Node, URIRef, Literal, Genid
from rdflib.paths import Path
from rdflib.store import Store
from rdflib.serializer import Serializer
from rdflib.parser import InputSource, Parser, create_input_source
from rdflib.namespace import NamespaceManager
from rdflib.resource import Resource
from rdflib.collection import Collection
import rdflib.util # avoid circular dependency
from rdflib.exceptions import ParserError
import os
import shutil
import tempfile
import pathlib
from io import BytesIO
from urllib.parse import urlparse
from urllib.request import url2pathname
assert Literal # avoid warning
assert Namespace # avoid warning
logger = logging.getLogger(__name__)
__doc__ = """\
RDFLib defines the following kinds of Graphs:
* :class:`~rdflib.graph.Graph`
* :class:`~rdflib.graph.QuotedGraph`
* :class:`~rdflib.graph.ConjunctiveGraph`
* :class:`~rdflib.graph.Dataset`
Graph
-----
An RDF graph is a set of RDF triples. Graphs support the python ``in``
operator, as well as iteration and some operations like union,
difference and intersection.
see :class:`~rdflib.graph.Graph`
Conjunctive Graph
-----------------
A Conjunctive Graph is the most relevant collection of graphs that are
considered to be the boundary for closed world assumptions. This
boundary is equivalent to that of the store instance (which is itself
uniquely identified and distinct from other instances of
:class:`Store` that signify other Conjunctive Graphs). It is
equivalent to all the named graphs within it and associated with a
``_default_`` graph which is automatically assigned a :class:`BNode`
for an identifier - if one isn't given.
see :class:`~rdflib.graph.ConjunctiveGraph`
Quoted graph
------------
The notion of an RDF graph [14] is extended to include the concept of
a formula node. A formula node may occur wherever any other kind of
node can appear. Associated with a formula node is an RDF graph that
is completely disjoint from all other graphs; i.e. has no nodes in
common with any other graph. (It may contain the same labels as other
RDF graphs; because this is, by definition, a separate graph,
considerations of tidiness do not apply between the graph at a formula
node and any other graph.)
This is intended to map the idea of "{ N3-expression }" that is used
by N3 into an RDF graph upon which RDF semantics is defined.
see :class:`~rdflib.graph.QuotedGraph`
Dataset
-------
The RDF 1.1 Dataset, a small extension to the Conjunctive Graph. The
primary term is "graphs in the datasets" and not "contexts with quads"
so there is a separate method to set/retrieve a graph in a dataset and
to operate with dataset graphs. As a consequence of this approach,
dataset graphs cannot be identified with blank nodes, a name is always
required (RDFLib will automatically add a name if one is not provided
at creation time). This implementation includes a convenience method
to directly add a single quad to a dataset graph.
see :class:`~rdflib.graph.Dataset`
Working with graphs
===================
Instantiating Graphs with default store (Memory) and default identifier
(a BNode):
>>> g = Graph()
>>> g.store.__class__
<class 'rdflib.plugins.stores.memory.Memory'>
>>> g.identifier.__class__
<class 'rdflib.term.BNode'>
Instantiating Graphs with a Memory store and an identifier -
<http://rdflib.net>:
>>> g = Graph('Memory', URIRef("http://rdflib.net"))
>>> g.identifier
rdflib.term.URIRef('http://rdflib.net')
>>> str(g) # doctest: +NORMALIZE_WHITESPACE
"<http://rdflib.net> a rdfg:Graph;rdflib:storage
[a rdflib:Store;rdfs:label 'Memory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
in a "database":
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
"[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']]."
Adding / removing reified triples to Graph and iterating over it directly or
via triple pattern:
>>> g = Graph()
>>> statementId = BNode()
>>> print(len(g))
0
>>> g.add((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.subject,
... URIRef("http://rdflib.net/store/ConjunctiveGraph"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> print(len(g))
4
>>> for s, p, o in g:
... print(type(s))
...
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
>>> for s, p, o in g.triples((None, RDF.object, None)):
... print(o)
...
Conjunctive Graph
>>> g.remove((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> print(len(g))
3
``None`` terms in calls to :meth:`~rdflib.graph.Graph.triples` can be
thought of as "open variables".
Graph support set-theoretic operators, you can add/subtract graphs, as
well as intersection (with multiplication operator g1*g2) and xor (g1
^ g2).
Note that BNode IDs are kept when doing set-theoretic operations, this
may or may not be what you want. Two named graphs within the same
application probably want share BNode IDs, two graphs with data from
different sources probably not. If your BNode IDs are all generated
by RDFLib they are UUIDs and unique.
>>> g1 = Graph()
>>> g2 = Graph()
>>> u = URIRef("http://example.com/foo")
>>> g1.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add([u, namespace.RDFS.label, Literal("bar")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add([u, namespace.RDFS.label, Literal("bing")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> len(g1 + g2) # adds bing as label
3
>>> len(g1 - g2) # removes foo
1
>>> len(g1 * g2) # only foo
1
>>> g1 += g2 # now g1 contains everything
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
>>> store = plugin.get("Memory", Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
>>> stmt1 = BNode()
>>> stmt2 = BNode()
>>> stmt3 = BNode()
>>> g1.add((stmt1, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'r | dflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.subject,
... URIRef('http://rdflib.net/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.object, Literal('Conjunctive Graph'))) # doctest: +ELLIPSIS |
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.subject,
... URIRef('http://rdflib.net/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.predicate, RDF.type)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdf |
danpoland/pyramid-restful-framework | pyramid_restful/pagination/utilities.py | Python | bsd-2-clause | 964 | 0 | from pyramid.compat import urlparse
def replace_query_param(url, key, val):
"""
Given a URL and a key/val pair, set or replace an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query)
query_dict[key] = [val]
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def remove_query_param(url, key):
"""
Given a URL and a key/val pa | ir, remove an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, que | ry, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query)
query_dict.pop(key, None)
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
|
itucsdb1611/itucsdb1611 | templates_operations/personal/default.py | Python | gpl-3.0 | 8,587 | 0.005008 | from flask import render_template
from flask import url_for
from flask import redirect
from flask import request
from datetime import datetime
from flask_login import current_user, login_required
from classes.operations.person_operations import person_operations
from classes.operations.project_operations import project_operations
from classes.operations.followed_person_operations import | followed_person_operations
from classes.operations.personComment_operations import personComment_operations
from classes.look_up_tables import *
from classes.person import Person
from classes.operations.followed_project_operations import followed_project_operations
from classes.followed_project import FollowedProject
from classes.operations.team_operations import team_operations
from classes.operations.education_operations import edu | cation_operations
from classes.operations.skill_operations import skill_operations
from classes.operations.Experience_operations import experience_operations
from classes.operations.information_operations import information_operations
from classes.operations.language_operations import language_operations
from classes.operations.CV_operations import cv_operations
import os
from werkzeug.utils import secure_filename
from passlib.apps import custom_app_context as pwd_context
from templates_operations.user import*
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def personal_default_page_config(request):
PersonProvider = person_operations()
Current_Person = PersonProvider.GetPerson(current_user.email)
comments = personComment_operations()
store_followed_projects = followed_project_operations()
EducationProvider = education_operations()
SkillProvider = skill_operations()
InformationProvider = information_operations()
LanguageProvider = language_operations()
TeamProvider = team_operations()
if request and 'delete' in request.form and request.method == 'POST':
p = PersonProvider.GetPersonByObjectId(request.form['delete'])
PersonProvider.DeletePerson(request.form['delete'])
if request and 'deleteComment' in request.form and request.method == 'POST':
comments.DeleteTeam(request.form['deleteComment'])
elif request and 'updateComment' in request.form and request.method == 'POST':
selectedComment = request.form['updateId']
updatedComment = request.form['updateComment']
comments.UpdatePersonComment(selectedComment, updatedComment)
elif request and 'addComment' in request.form and request.method == 'POST':
personId = Current_Person[0]
commentedPersonId = Current_Person[0]
newComment = request.form['addComment']
comments.AddPersonComment(personId, commentedPersonId, newComment)
elif 'unfollowProject' in request.form:
project_id = request.form['unfollowProject']
store_followed_projects.DeleteFollowedProject(project_id)
elif request and 'searchPeoplePage' in request.form and request.method == 'POST':
return redirect(url_for('site.people_search_person_page'))
elif request and 'searchProjectPage' in request.form and request.method == 'POST':
return redirect(url_for('site.projects_search_page'))
elif request and 'saveProfileSettings' in request.form and request.method == 'POST':
FollowedPersonProvider = followed_person_operations()
listFollowing = FollowedPersonProvider.GetFollowedPersonListByPersonId(Current_Person[0])
listFollowers = FollowedPersonProvider.GetFollowedPersonListByFollowedPersonId(Current_Person[0])
personComments = comments.GetPersonCommentsByCommentedPersonId(Current_Person[0])
listTitle = GetTitleList()
listAccount = GetAccountTypeList()
first_name = request.form['firstName']
last_name = request.form['lastName']
pswd = request.form['pswd']
accountType = request.form['account']
title = request.form['title']
file = request.files['file']
gender = request.form['r1']
if gender == 'male':
gender = False
elif gender == 'female':
gender = True
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if filename != Current_Person[7]:
file.save(os.path.join('static/user_images', filename))
else:
filename = Current_Person[7]
elif Current_Person[7] is None:
if gender:
filename = 'noimage_female.jpg'
else:
filename = 'noimage_male.jpg'
else:
filename = Current_Person[7]
if pswd != "":
pswd = pwd_context.encrypt(request.form['pswd'])
UpdateUser(pswd, current_user.email)
PersonProvider.UpdatePerson(Current_Person[0], first_name, last_name, accountType, ' ', gender, title, filename, False)
return redirect(url_for('site.personal_default_page', Current_Person=Current_Person,
listFollowing=listFollowing, listFollowers=listFollowers,
personComments=personComments, listAccount=listAccount, listTitle=listTitle))
FollowedPersonProvider = followed_person_operations()
listFollowing = FollowedPersonProvider.GetFollowedPersonListByPersonId(Current_Person[0])
listFollowers = FollowedPersonProvider.GetFollowedPersonListByFollowedPersonId(Current_Person[0])
personComments = comments.GetPersonCommentsByCommentedPersonId(Current_Person[0])
followed_projects = store_followed_projects.GetFollowedProjectListByPersonId(Current_Person[0])
count = 0
while (count < len(followed_projects)):
temp = list(followed_projects[count])
temp.append(list(TeamProvider.GetAllMembersByProjectId(followed_projects[count][8])))
temp.append(len(store_followed_projects.GetFollowerPersonListByFollowedProjectId(followed_projects[count][8])))
followed_projects[count] = tuple(temp)
count = count + 1
now = datetime.datetime.now()
listTitle = GetTitleList()
listAccount = GetAccountTypeList()
store_projects = project_operations()
active_projects = store_projects.get_the_projects_of_a_person(Current_Person[0])
count = 0
while (count < len(active_projects)):
temp = list(active_projects[count])
temp.append(list(TeamProvider.GetAllMembersByProjectId(active_projects[count][3])))
temp.append(len(store_followed_projects.GetFollowerPersonListByFollowedProjectId(active_projects[count][3])))
active_projects[count] = tuple(temp)
count = count + 1
active_project_number = len(active_projects)
listEducation = EducationProvider.GetEducationListByActiveCVAndByPersonId(Current_Person[0])
listSkill = SkillProvider.GetSkillByActiveCVAndByPersonId(Current_Person[0])
listLanguage = LanguageProvider.GetAllLanguagesByActiveCVAndByPersonId(Current_Person[0])
listInformation = InformationProvider.get_all_information_by_ActiveCV_And_PersonId(Current_Person[0])
CvProvider = cv_operations()
activeCv = CvProvider.get_active_cv(Current_Person[0])
ExperienceProvider=experience_operations()
if activeCv:
listExperience = ExperienceProvider.get_experiences_with_key(activeCv[0])
else:
listExperience = 'none'
return render_template('personal/default.html', current_time=now.ctime(), Current_Person=Current_Person,
listFollowing=listFollowing, listFollowers=listFollowers, followed_projects=followed_projects,
personComments=personComments, listAccount=listAccount, listTitle=listTitle,
active_projects=active_projects, active_project_number=active_project_number,listEducation=listEducation, listSkill=listSkill,
listExperience=listExperience, listLanguage=listLanguage, listInformation=listInformation)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# if submit_type == 'GET':
# store = followed_person_operations()
# result = store.GetFollowedPersonB |
Mitali-Sodhi/CodeLingo | Dataset/python/test_utils_sitemap.py | Python | mit | 5,404 | 0.003331 | import unittest
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
class SitemapTest(unittest.TestCase):
def test_sitemap(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'}, {'priority': '0.8', 'loc': 'http://www.example.com/Special-Offers.html', 'lastmod': '2009-08-16', 'changefreq': 'weekly'}])
def test_sitemap_index(self):
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml.gz</loc>
<lastmod>2004-10-01T18:23:17+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml.gz</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>""")
assert s.type == 'sitemapindex'
self.assertEqual(list(s), [{'loc': 'http://www.example.com/sitemap1.xml.gz', 'lastmod': '2004-10-01T18:23:17+00:00'}, {'loc': 'http://www.example.com/sitemap2.xml.gz', 'lastmod': '2005-01-01'}])
| def test_sitemap_strip(self):
"""Assert we can deal with trailing spaces inside <loc> tags - we've
seen those
"""
| s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_wrong_ns2(self):
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap("""<?xml version="1.0" encoding="UTF-8"?>
<urlset>
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
""")
assert s.type == 'urlset'
self.assertEqual(list(s),
[{'priority': '1', 'loc': 'http://www.example.com/', 'lastmod': '2009-08-16', 'changefreq': 'daily'},
{'loc': 'http://www.example.com/2', 'lastmod': ''},
])
def test_sitemap_urls_from_robots(self):
robots = """User-agent: *
Disallow: /aff/
Disallow: /wl/
# Search and shopping refining
Disallow: /s*/*facet
Disallow: /s*/*tags
# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
# Forums
Disallow: /forum/search/
Disallow: /forum/active/
"""
self.assertEqual(list(sitemap_urls_from_robots(robots)),
['http://example.com/sitemap.xml', 'http://example.com/sitemap-product-index.xml'])
def test_sitemap_blanklines(self):
"""Assert we can deal with starting blank lines before <xml> tag"""
s = Sitemap("""\
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<!-- cache: cached = yes name = sitemap_jspCache key = sitemap -->
<sitemap>
<loc>http://www.example.com/sitemap1.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap3.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<!-- end cache -->
</sitemapindex>
""")
self.assertEqual(list(s), [
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap1.xml'},
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap2.xml'},
{'lastmod': '2013-07-15', 'loc': 'http://www.example.com/sitemap3.xml'},
])
if __name__ == '__main__':
unittest.main()
|
kumar303/rockit | vendor-local/boto/cloudformation/template.py | Python | bsd-3-clause | 1,318 | 0.002276 | from boto.resultset import ResultSet
class Template:
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.template_parameters = None
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.template_parameters = ResultSet([('member', TemplateParameter)])
return self.template_parameters
else:
return None
def endElement(self, name, value, connection):
if name == "Description":
| self.description = value
else:
setattr(self, name, value)
class TemplateParameter:
def __init__(self, parent):
self.parent = parent
self.default_value = None
self.descriptio | n = None
self.no_echo = None
self.parameter_key = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "DefaultValue":
self.default_value = value
elif name == "Description":
self.description = value
elif name == "NoEcho":
self.no_echo = bool(value)
elif name == "ParameterKey":
self.parameter_key = value
else:
setattr(self, name, value)
|
jirikuncar/invenio-demosite | invenio_demosite/base/recordext/functions/get_creation_date.py | Python | gpl-2.0 | 1,041 | 0.012488 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## M | ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
def get_creation_date(recid):
"""
Returns creation date for given record.
@p | aram recid:
@return: Creation date
"""
from invenio.modules.records.models import Record as Bibrec
return Bibrec.query.get(recid).creation_date
|
michogar/FullDiskAlert | setup.py | Python | gpl-3.0 | 399 | 0.002506 | __a | uthor__ = 'michogarcia'
from setuptools import setup, find_packages
version = '0.1'
setup(name='FullDiskAlert',
version=version,
author="Micho Garcia",
author_email="micho.garcia@geomati.co",
license="LICENSE.txt",
description="Sends mail when disk is above threshold",
packages=find_packages(),
install_requires=[
'pyyaml',
],
| ) |
insomnia-lab/libreant | webant/agherant_standalone.py | Python | agpl-3.0 | 591 | 0 | from flask import Flask, request
from flask_bootstrap import Bootstrap
from flask_babel import Babel
import agherant
from webserver_utils import gevent_run
def create_app(conf):
app = Flask(__name__)
app.config.update(conf)
Bootstrap(app)
babel = Babel(app)
app.register_blueprint(agherant.agherant, url_prefix='/agherant')
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(['en', 'it', 'sq'])
return app
def main(conf={}):
ap | p = create_app(conf)
gevent_run( | app)
if __name__ == '__main__':
main()
|
nazrulworld/mailbox | application/validators.py | Python | mit | 565 | 0.00354 | # _*_ | coding: utf-8 _*_
__author__ = 'nis | lam <connect2nazrul@gmail.com>'
import re
from wtforms.validators import ValidationError
class EmailProviders(object):
def __init__(self, message=None):
if message is None:
message = u'We are only accept Gmail or Yahoo or AOL or Outlook'
self.message = message
self.pattern = r'@(gmail|yahoo|aol|aim|outlook|live|hotmail)\.(com|net|org)'
def __call__(self, form, field):
if re.search(self.pattern, field.data) is None:
raise ValidationError(self.message)
|
tyarkoni/pliers | pliers/extractors/audio.py | Python | bsd-3-clause | 23,156 | 0.002202 | ''' Extractors that operate on AudioStim inputs. '''
from abc import ABCMeta
from os import path
import sys
import logging
import numpy as np
from scipy import fft
import pandas as pd
from pliers.stimuli.audio import AudioStim
from pliers.stimuli.text import ComplexTextStim
from pliers.extractors.base import Extractor, ExtractorResult
from pliers.utils import attempt_to_import, verify_dependencies, listify
from pliers.support.exceptions import MissingDependencyError
from pliers.support.setup_yamnet import YAMNET_PATH
librosa = attempt_to_import('librosa')
tf = attempt_to_import('tensorflow')
class AudioExtra | ctor(Extractor):
''' Base Audio Extractor class; all subclasses can only be applied to
audio. '''
_inpu | t_type = AudioStim
class STFTAudioExtractor(AudioExtractor):
''' Short-time Fourier Transform extractor.
Args:
frame_size (float): The width of the frame/window to apply an FFT to,
in seconds.
hop_size (float): The step size to increment the window by on each
iteration, in seconds (effectively, the sampling rate).
freq_bins (list or int): The set of bins or frequency bands to extract
power for. If an int is passed, this is the number of bins
returned, with each bin spanning an equal range of frequencies.
E.g., if bins=5 and the frequency spectrum runs from 0 to 20KHz,
each bin will span 4KHz. If a list is passed, each element must be
a tuple or list of lower and upper frequency bounds. E.g., passing
[(0, 300), (300, 3000)] would compute power in two bands, one
between 0 and 300Hz, and one between 300Hz and 3KHz.
spectrogram (bool): If True, plots a spectrogram of the results.
Notes: code adapted from
http://stackoverflow.com/questions/2459295/invertible-stft-and-istft-in-python
'''
_log_attributes = ('frame_size', 'hop_size', 'freq_bins')
VERSION = '1.0'
def __init__(self, frame_size=0.5, hop_size=0.1, freq_bins=5,
spectrogram=False):
self.frame_size = frame_size
self.hop_size = hop_size
self.spectrogram = spectrogram
self.freq_bins = freq_bins
super().__init__()
def _stft(self, stim):
x = stim.data
framesamp = int(self.frame_size * stim.sampling_rate)
hopsamp = int(self.hop_size * stim.sampling_rate)
w = np.hanning(framesamp)
X = np.array([fft(w * x[i:(i + framesamp)])
for i in range(0, len(x) - framesamp, hopsamp)])
nyquist_lim = int(X.shape[1] // 2)
X = np.log(X[:, :nyquist_lim])
X = np.absolute(X)
if self.spectrogram:
import matplotlib.pyplot as plt
bins = np.fft.fftfreq(framesamp, d=1. / stim.sampling_rate)
bins = bins[:nyquist_lim]
plt.imshow(X.T, origin='lower', aspect='auto',
interpolation='nearest', cmap='RdYlBu_r',
extent=[0, stim.duration, bins.min(), bins.max()])
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.colorbar()
plt.show()
return X
def _extract(self, stim):
data = self._stft(stim)
time_bins = np.arange(0., stim.duration - self.frame_size,
self.hop_size)
if isinstance(self.freq_bins, int):
bins = []
bin_size = int(data.shape[1] / self.freq_bins)
for i in range(self.freq_bins):
if i == self.freq_bins - 1:
bins.append((i * bin_size, data.shape[1]))
else:
bins.append((i * bin_size, (i + 1) * bin_size))
self.freq_bins = bins
features = ['%d_%d' % fb for fb in self.freq_bins]
offset = 0.0 if stim.onset is None else stim.onset
index = [tb + offset for tb in time_bins]
values = np.zeros((len(index), len(features)))
for i, fb in enumerate(self.freq_bins):
start, stop = fb
values[:, i] = data[:, start:stop].mean(1)
values[np.isnan(values)] = 0.
values[np.isinf(values)] = 0.
return ExtractorResult(values, stim, self, features=features,
onsets=index, durations=self.hop_size,
orders=list(range(len(index))))
class MeanAmplitudeExtractor(Extractor):
''' Mean amplitude extractor for blocks of audio with transcription. '''
_input_type = (AudioStim, ComplexTextStim)
def _extract(self, stim):
amps = stim.audio.data
sampling_rate = stim.audio.sampling_rate
elements = stim.complex_text.elements
values, onsets, durations = [], [], []
for i, el in enumerate(elements):
onset = sampling_rate * el.onset
onsets.append(onset)
duration = sampling_rate * el.duration
durations.append(duration)
r_onset = np.round(onset).astype(int)
r_offset = np.round(onset + duration).astype(int)
if not r_offset <= amps.shape[0]:
raise Exception('Block ends after data.')
mean_amplitude = np.mean(amps[r_onset:r_offset])
values.append(mean_amplitude)
orders = list(range(len(elements)))
return ExtractorResult(values, stim, self, features=['mean_amplitude'],
onsets=onsets, durations=durations,
orders=orders)
class LibrosaFeatureExtractor(AudioExtractor, metaclass=ABCMeta):
''' A generic class for audio extractors using the librosa library. '''
_log_attributes = ('hop_length', 'librosa_kwargs')
def __init__(self, feature=None, hop_length=512, **librosa_kwargs):
verify_dependencies(['librosa'])
if feature:
self._feature = feature
self.hop_length = hop_length
self.librosa_kwargs = librosa_kwargs
super().__init__()
def get_feature_names(self):
return self._feature
def _get_values(self, stim):
if self._feature in ['zero_crossing_rate', 'rms', 'spectral_flatness']:
return getattr(librosa.feature, self._feature)(
y=stim.data, hop_length=self.hop_length, **self.librosa_kwargs)
elif self._feature == 'tonnetz':
return getattr(librosa.feature, self._feature)(
y=stim.data, sr=stim.sampling_rate, **self.librosa_kwargs)
elif self._feature in[ 'onset_detect', 'onset_strength_multi']:
return getattr(librosa.onset, self._feature)(
y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,
**self.librosa_kwargs)
elif self._feature in[ 'tempo', 'beat_track']:
return getattr(librosa.beat, self._feature)(
y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,
**self.librosa_kwargs)
elif self._feature in[ 'harmonic', 'percussive']:
return getattr(librosa.effects, self._feature)(
y=stim.data,
**self.librosa_kwargs)
else:
return getattr(librosa.feature, self._feature)(
y=stim.data, sr=stim.sampling_rate, hop_length=self.hop_length,
**self.librosa_kwargs)
def _extract(self, stim):
values = self._get_values(stim)
if self._feature=='beat_track':
beats=np.array(values[1])
values=beats
values = values.T
n_frames = len(values)
feature_names = listify(self.get_feature_names())
onsets = librosa.frames_to_time(range(n_frames),
sr=stim.sampling_rate,
hop_length=self.hop_length)
onsets = onsets + stim.onset if stim.onset else onsets
durations = [self.hop_length / float(stim.sampling_rate)] * n_frames
return ExtractorResult(values, stim, self, features=feature_names,
onsets=onsets, durations=durations,
|
lgrech/MapperPy | mapperpy/attributes_util.py | Python | bsd-3-clause | 1,031 | 0.00194 | import inspect
def get_attributes(obj):
if isinstance(obj, dict):
return obj.keys( | )
attributes = inspect.getmembers(obj, lambda a: not(inspect.isroutine(a | )))
return [attr[0] for attr in attributes if not(attr[0].startswith('__') and attr[0].endswith('__'))]
class AttributesCache(object):
def __init__(self, get_attributes_func=get_attributes):
self.__cached_class = None
self.__cached_class_attrs = None
self.__get_attributes_func = get_attributes_func
def get_attrs_update_cache(self, obj):
if isinstance(obj, dict):
return set(obj.keys())
elif self.__cached_class_attrs is not None and isinstance(obj, self.__cached_class):
return self.__cached_class_attrs
else:
self.__update_source_class_cache(obj)
return self.__cached_class_attrs
def __update_source_class_cache(self, obj):
self.__cached_class = type(obj)
self.__cached_class_attrs = set(self.__get_attributes_func(obj))
|
daniel-yavorovich/django-redmine-auth-backend | setup.py | Python | apache-2.0 | 382 | 0 | from distutils.core import setup
setup(
name='django-redmine-auth-backend',
version='0.1',
packa | ges=['redmine_auth'],
url='https://github.com/daniel-yavorovich/django-redmine-auth-backend',
license='Apac | he V2 License',
author='daniel',
author_email='daniel@quietsupport.net',
description='A Django authentication backend for use with the Redmine'
)
|
frrmack/CallofCthulhu | cardheap.py | Python | mit | 6,455 | 0.010225 | from card import Card
from util import *
from layout import *
import pygame
class CardHeap(list):
# A pile of cards
def __init__(self):
list.__init__(self)
#-- Actions
def add(self, card):
self.append(card)
card.position = self
def remove(self, card):
list.remove(self, card)
card.position = None
def putInBottom(self, card):
self.insert(0, card)
class Deck(CardHeap):
def __init__(self, name='Unnamed Deck'):
CardHeap.__init__(self)
self.name = name
class Hand(CardHeap):
def __init__(self, player=None):
self.player = player
self.rect = pygame.Rect(0,0,0,0)
if player != None and player.game != None:
self.screen = self.player.game.screen
def add(self, card):
CardHeap.add(self,card)
if self.player.position == "Player 2":
card.image.hide()
self.screen = self.player.screen
card.image.surface = scale(card.image.orgSurface, size=(HANDCARDWIDTH, HANDCARDHEIGHT))
card.image.backSurface = scale(card.image.orgBackSurface, size=(HANDCARDWIDTH, HANDCARDHEIGHT))
self.redraw()
def remove(self, card):
CardHeap.remove(self, card)
if self.player.position == "Player 2":
card.image.unhide()
self.screen = self.player.screen
if card.image in self.screen.drawnImages:
self.screen.drawnImages.remove(card.image)
card.image.surface = scale(card.image.orgSurface, size=(CARDWIDTH, CARDHEIGHT))
card.image.backSurface = scale(card.image.orgBackSurface, size=(CARDWIDTH, CARDHEIGHT))
self.redraw()
def belongToPlayer(self, player):
self.player = player
self.screen = self.player.game.screen
def get_pos(self):
self.screen = self.player.game.screen
x = self.screen.width - HANDCARDWIDTH
if self.player.position == "Player 1":
y = self.screen.height - HANDCARDHEIGHT
elif self.player.position == "Player 2":
y = 0
else:
raise GameError("Only available player positions are Player 1 and Player 2.")
return (x,y)
def draw(self):
self.screen = self.player.game.screen
x,y = self.pos = self.get_pos()
if len(self) == 1:
step = 0
else:
step = toInt( (HANDMAXWIDTH-HANDCARDWIDTH) / (len(self)-1.) )
step = trunc(step, top=MAXHANDSTEP)
self.rect = pygame.Rect(x-(len(self)-1)*step,y,HANDCARDWIDTH+(len(self)-1)*step,HANDCARDHEIGHT)
if self.player.position == "Player 1": #show cards
for i in range(len(self)-1,-1,-1):
pos = (x-step*i, y)
self[len(self)-1-i].image.draw(pos)
elif self.player.position == "Player 2": #don't show cards
for i in range(len(self)-1,-1,-1):
pos = (x-step*i, y)
self[len(self)-1-i].image.draw(pos)
def clear(self):
self.screen = self.player.game.screen
self.screen.blit(self.screen.background.subsurface(self.rect),self.rect)
for card in self:
if card in self.screen.drawnImages:
self.screen.drawnImages.remove(card.image)
def redraw(self):
self.clear()
self.draw()
class DiscardPile(CardHeap):
def __init__(self, player=None):
self.player = player
self.rect = pygame.Rect(0,0,0,0)
if player != None and player.game != None:
self.screen = self.player.game.screen
def add(self, card):
CardHeap.add(self,card)
self.screen = self.player.screen
if card.isInsane():
card.restore()
if card.isExhausted():
card.ready()
card.wounds = 0
card.image.surface = scale(card.image.orgSurface, size=(DISCARDWIDTH, DISCARDHEIGHT))
card.image.backSurface = scale(card.image.orgBackSurface, size=(DISCARDWIDTH, DISCARDHEIGHT))
card.image.bigSurface = scale(card.image.orgSurface, size=card.image.zoomSize)
card.image.turnLeft()
self.redraw()
def remove(self, card):
list.remove(self, card)
if graphicsOn(self.player):
self.screen = self.player.screen
if card.image in self.screen.drawnImages:
self.screen.drawnImages.remove(card.image)
card.image.surface = scale(card.image.orgSurface, size=(CARDWIDTH, CARDHEIGHT))
card.image.backSurface = scale(card.image.orgBackSurface, size=(CARDWIDTH, CARDHEIGHT))
card.image.turnLeft()
self.redraw()
def get_rect(self):
self.screen = self.player.game.screen
x = self.player.domainPanel.get_width()
x = trunc(x, bottom= DOMAINWIDTH + 8*RESOURCEBAR)
if self.player.position == "Player 1":
y = self.screen.height - DISCARDPANELHEIGHT
elif self.player.position == "Player 2":
y = 0
else:
raise GameError("Only available player positions are Player 1 and Player 2.")
self.pos = (x,y)
self.width = w = self.screen.width - RIGHTPANELWIDTH - 5*SMALLMARGIN - x
self.height = h = DISCARDPANELHEIGHT
self.rect = pygame.Rect | (x,y,w,h)
return self.rect
def draw(self):
self.screen = self.player.game.screen
x,y,w,h = self.get_rect()
x = x + w - DISCARDHEIGHT
if len(self) > 1:
step = (w-DI | SCARDHEIGHT)//(len(self)-1)
step = min(step, DISCARDSTEP)
else:
step = DISCARDSTEP
if self.player.position == "Player 1":
for i in range(len(self)):
pos = (x - step*i, y)
self[i].image.draw(pos)
elif self.player.position == "Player 2":
for i in range(len(self)):
pos = (x - step*i, y)
self[i].image.draw(pos)
else:
raise GameError("Only available player positions are Player 1 and Player 2.")
def clear(self):
for card in self:
if card.image in self.screen.drawnImages:
self.screen.drawnImages.remove(card.image)
self.screen.blit(self.screen.background.subsurface(self.rect),self.rect)
def redraw(self):
self.screen = self.player.game.screen
self.clear()
self.draw()
|
Tefx/turnip | storage/auth/baidu_auth.py | Python | gpl-2.0 | 1,525 | 0.03541 | import httplib
import urllib
import json
client_id = "UkDnoQzEWYdVMkvbtQeNfP0B"
client_secret = "LoooyWceGKIrxNyG0niwiwjCYLB8X0xw"
parameters_code = {"client_id": client_id,
"response_type": "device_code",
"scope": "netdisk"}
parameters_token = {"grant_type": "device_token",
"code": "",
"client_id": client_id,
"client_secret": client_secret}
def get_code():
conn = httplib.HTTPSConnection("openapi.baidu.com")
conn.request("GET", "/oauth/2.0/device/code?%s" % urllib.urlencode(parameters_code))
response = conn.getresponse()
return json.loads(response.read())
def get_token(code):
parameters_token["code"] = code["device_code | "]
conn = httplib.HTTPSConnection("openapi.baidu.com")
conn.request("GET", "/oauth/2.0/token?%s" % urllib.urlencode(parameters_token))
response = conn.getresponse()
return json.loads(response.read())
def auth():
code = get_code()
if "error" in code:
print "Get User Code failed: [%s] %s" % (code["error"], code["error_ | description"])
return
print "Your User Code is: %s" % code["user_code"]
print "Please open %s to finish the authorization" % code["verification_url"]
raw_input("And press any key to continue...")
token = get_token(code)
if "error" in token:
print "Get Access Token failed: [%s] %s" % (token["error"], token["error_description"])
return
print "Please add below information to your configuration!\n"
for k,v in token.iteritems():
print "%s=%s" % (k,v)
if __name__ == '__main__':
auth() |
foursquare/pants | tests/python/pants_test/backend/jvm/tasks/false.py | Python | apache-2.0 | 330 | 0.006061 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licen | sed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
#This works just like /bin/false, but Windows users might not have that
sys.ex | it(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.