repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
salkinium/bachelor
|
experiment_control/commands/base.py
|
1
|
1510
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import logging
import os
class BaseCommand(object):
logger_initialized = False
def __init__(self, arguments=None, log_path='/var/log/boxmanager'):
super(BaseCommand, self).__init__()
self.arguments = arguments if arguments else []
self.log_path = log_path
self.logger = logging.getLogger('Command')
if not BaseCommand.logger_initialized:
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# console logging
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.DEBUG)
self.ch.setFormatter(formatter)
self.logger.addHandler(self.ch)
# file logging
fh = logging.FileHandler(os.path.join(self.log_path, 'scriptmanager.log'))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
BaseCommand.logger_initialized = True
def execute(self, _):
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return "BaseCommand()"
|
bsd-2-clause
| -1,765,331,765,717,810,700
| 29.2
| 97
| 0.580795
| false
| 4.389535
| false
| false
| false
|
elitegreg/mudpy
|
tyderium/socket.py
|
1
|
2832
|
from . import lib
from .timeout import Timeout
import greenlet
import errno
import socket as stdsocket
from socket import * # for convenience
from socket import timeout as timeout_error
class socket(stdsocket.socket):
__slots__ = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setblocking(False)
def __wait(self, events, timeout=None):
try:
with Timeout(timeout if timeout else super().gettimeout()):
lib.Io(fd=self.fileno(), events=events).start()
except TimeoutError:
raise timeout_error
def connect(self, addr, timeout=None):
ret = self.connect_ex(addr)
if ret == 0:
return
if ret != errno.EINPROGRESS:
raise stdsocket.error(ret)
self.__wait(lib.EV_WRITE, timeout)
def send(self, value, timeout=None, *args, **kwargs):
while True:
try:
return super().send(value, *args, **kwargs)
except stdsocket.error as err:
if err.errno not in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
raise
self.__wait(lib.EV_WRITE, timeout)
def sendall(self, value, timeout=None, *args, **kwargs):
while True:
bytes = self.send(value, timeout, *args, **kwargs)
if bytes >= len(value):
return
value = value[bytes:]
def recv(self, size, timeout=None, *args, **kwargs):
while True:
fd = self.fileno()
if fd < 0:
return b''
self.__wait(lib.EV_READ, timeout)
try:
return super().recv(size, *args, **kwargs)
except stdsocket.error as err:
if err.errno in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
continue
raise
def accept(self, timeout=None):
while True:
self.__wait(lib.EV_READ, timeout)
try:
sock, addr = super().accept()
sock.setblocking(False)
sock.__class__ = socket
return sock, addr
except stdsocket.error as err:
if err.errno in (errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR):
continue
raise
if __name__ == '__main__':
from .hub import Hub
def get():
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 8000))
sock.send(b'GET / HTTP/1.0\r\n\r\n') # wrong but ok for sample
sock.shutdown(SHUT_WR)
while True:
data = sock.recv(4096)
if not data:
break
print(data)
while True:
with Hub() as hub:
hub.spawn(get)
hub.switch()
|
gpl-3.0
| 5,062,856,902,460,464,000
| 29.782609
| 83
| 0.521893
| false
| 4.02845
| false
| false
| false
|
drm343/HalfDragon_Bot
|
v2/main.py
|
1
|
5127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import androidhelper
import requests
import json
import telebot
import telebot.util as util
import sys
import time
import os
bot = telebot.TeleBot(my_token)
class MachineStatus:
def __init__(self):
self.phone = androidhelper.Android()
self.phone.batteryStartMonitoring()
self.chat_id = False
self.less_40 = False
self.less_20 = False
@util.async()
def monitor(self):
while True:
time.sleep(5)
bettery = self.phone.batteryGetLevel()[1]
if ((bettery / 10) <= 4) and self.chat_id and not self.less_40:
bot.send_message(self.chat_id, "低於 40%")
self.less_40 = True
elif ((bettery / 10) <= 1) and self.chat_id and not self.less_20:
bot.send_message(self.chat_id, "快沒電了")
self.less_20 = True
elif ((bettery / 10) >= 10) and self.chat_id and (self.less_20 or self.less_40):
bot.send_message(self.chat_id, "充電完畢")
self.less_20 = False
self.less_40 = False
status = MachineStatus()
status.monitor()
def is_someuser(username):
return lambda message: (message.chat.username == username)
def get_parameters(message):
try:
return message.split(" ", 1)[1]
except:
return ""
is_drm343 = is_someuser("drm343")
is_DummyData = is_someuser("DummyData")
class FTP:
def __init__(self):
self.host = "ftp://domain"
self.port = 22
self.username = "username"
self.password = "password"
def split_host_and_port(self, message):
host, self.port = message.split(":", 1)
self.host = "ftp://{0}".format(host)
def message(self, message):
return message.format(self.host, self.port, self.username, self.password)
def change_start(self, message):
msg = bot.reply_to(message, "是否更改主機位置或 port?目前為 {0}:{1} (y/N)".format(self.host, self.port))
bot.register_next_step_handler(msg, self.pre_change_host)
def pre_change_host(self, message):
Y = message.text
if (Y == "Y") or (Y == "y"):
msg = bot.reply_to(message, """\
請輸入新的主機位置
格式為「主機:port」
不需要輸入開頭 ftp://""".format(self.host))
bot.register_next_step_handler(msg, self.post_change_host)
else:
self.pre_change_username(message)
def post_change_host(self, message):
self.split_host_and_port(message.text)
self.pre_change_username(message)
def pre_change_username(self, message):
msg = bot.reply_to(message, "請輸入帳號?目前為:{0}".format(self.username))
bot.register_next_step_handler(msg, self.post_change_username)
def post_change_username(self, message):
self.username = message.text
self.pre_change_password(message)
def pre_change_password(self, message):
msg = bot.reply_to(message, "請輸入密碼?目前為:\"{0}\"(沒有\")".format(self.password))
bot.register_next_step_handler(msg, self.post_change_password)
def post_change_password(self, message):
self.password = message.text
chat_id = message.chat.id
bot.send_message(chat_id, "更新完成")
ftp = FTP()
class HalfDragonBot:
# command
@bot.message_handler(commands=["start"])
def start_bot(message):
if is_drm343(message):
status.chat_id = message.chat.id
bot.send_message(status.chat_id, "已設定完成")
@bot.message_handler(commands=["rules"])
def show_rules(message):
HELP_MESSAGE = """\
目前僅供半龍史萊姆群組使用(暫定)
加入第一句話必須說「我是新手」
邀請連結
某甲髒髒ftp
主機位置: {0}
Port: {1}
帳號: {2}
密碼: {3}
半龍史萊姆論壇"""
bot.reply_to(message, ftp.message(HELP_MESSAGE))
@bot.message_handler(commands=["set_ftp"])
def set_ftp(message):
if is_drm343(message) or is_DummyData(message):
ftp.change_start(message)
else:
bot.reply_to(message, "你沒有修改權限")
@bot.message_handler(commands=["output"])
def count_probability(message):
result = my_test(get_parameters(message.text))
response_message = ""
try:
response_message = "結果 | 機率\n"
next_message = "{0} | {1}\n"
for item in result["distributions"]["data"][0]:
response_message = response_message + next_message.format(item[0], item[1])
except:
response_message = result["error"]["message"]
bot.reply_to(message, response_message)
def my_test(message):
parameters = {"program":"output+{0}".format(message)}
response = requests.post("http://anydice.com/calculator_limited.php", data = parameters)
result = json.loads(response.text)
return result
if __name__ == '__main__':
while True:
try:
bot.polling()
except:
pass
|
mit
| -4,120,896,432,437,278,700
| 25.972222
| 100
| 0.59691
| false
| 3.015528
| false
| false
| false
|
llinmeng/PythonStudy
|
python_project/23/newsagent2.py
|
1
|
4783
|
# -*- coding: utf-8 -*-
from nntplib import NNTP
from time import strftime, time, localtime
from email import message_from_string
from urllib import urlopen
import textwrap
import re
day = 24 * 60 * 60 # 一天的秒数
def wrap(string, max = 70):
"""
将字符串调整为最大行宽
:param string:
:param max:
:return:
"""
return '\n'.join(textwrap.wrap(string)) + '\n'
class NewsAgent:
"""
可以将新闻来源获取新闻项目并且发布到新闻目标的对象
"""
def __init__(self):
self.sources = []
self.destination = []
def addSource(self, source):
self.sources.append(source)
def addDestination(self, dest):
self.destination.append(dest)
def distribute(self):
"""
从所有来源获取所有新闻项目并且发布到所有目标
"""
items= []
for source in self.sources:
items.extend(source.getItems())
for dest in self.destination:
dest.receiveItems(items)
class NewsItem:
"""
包括标题和主体文本的简单新闻项目
"""
def __init__(self, title, body):
self.title = title
self.body = body
class NNTPSource:
"""
从NNTP组中获取新闻项目的新闻来源
"""
def __init__(self, servername, group, window):
self.servername = servername
self.group = group
self.window = window
def getItems(self):
start = localtime(time() - self.window * day)
date = strftime('%y%m%d', start)
hour = strftime('H%m%s', start)
server = NNTP(self.servername)
ids = server.newnews(self.grou, date, hour)[1]
for id in ids:
lines = server.article(id)[3]
message = message_from_string('\n'.join(lines))
title = message['subject']
body = message.get_playload()
if message.is_multipart():
body = body[0]
yield NewsItem(title, body)
server.quit()
class SimpleWebSource:
"""
使用正则表达式从网页中提取新闻项目的新闻来源
"""
def __init__(self, url,titlePattern, bodyPattern):
self.url = url
self.titlePattern = titlePattern
self.bodyPattern = bodyPattern
def getItems(self):
text = urlopen(self.url).read()
titles = self.titlePattern.findall(text)
bodies = self.bodyPattern.findall(text)
for title, body in zip(titles, bodies):
yield NewsItem(title, wrap(body))
class PlainDestination:
"""
将所有新闻项目格式化为纯文本的新闻目标类
"""
def receiveItems(self, items):
for item in items:
print (item.title)
print ('-'*len(item.title))
print (item.body)
class HTMLDestination:
"""
将所有新闻项目格式化为HTML的目标类
"""
def __init__(self, filename):
self.filename = filename
def receiveItems(self, items):
out = open(self.filename, 'w')
print >> out, """
<html>
<head>
<title>Today's News</title>
</head>
<body>
<h1>Today's News</title>
"""
print >> out, '<ul>'
id = 0
for item in items:
id += 1
print >> out, '<li><a href = "#%i">%s</a></li>' % (id, item.title)
print >> out, '</ul>'
id = 0
for item in items:
id += 1
print >> out, '<h2><a name = "%i">%s</a></h2>' % (id, item.title)
print >> out, '<pre>%s</pre>' % item.body
print >> out, """
</body>
</html>
"""
def runDefaultSetup(self):
"""
来源和目标的默认设置,可以自己修改
:return:
"""
agent = NewsAgent()
# 从XXX新闻站获取新闻的SimpleWebSource:
bbc_url = 'http://news.bbc.co.uk/text_only.stm'
bbc_title = r'(?s)a href="[^"]*">\s*<b>\s*(.*?)\s*<\b>'
bbc_body = r'(?s)</a>\s*<br />/s*(.*?)\s*<'
agent.addSource(bbc)
# 从comp.lang.python.announce获取新闻的NNTPSource
clpa_server = 'news.foo.bar' # Insert real server name
clpa_group = 'comp.lang.python.announce'
clpa_window = 1
clpa = NNTPSource(clpa_server, clpa_group, clpa_window)
agent.addSource(clpa)
# 增加文本目标和HTML目标
agent.addDestination(PlainDestination())
agent.addDestination(HTMLDestination('news.html'))
# 发布新闻项目
agent.distribute()
if __name__ == '__main__':
runDefaultSetup()
|
mit
| -7,183,054,910,938,134,000
| 22.989071
| 83
| 0.525177
| false
| 3.097389
| false
| false
| false
|
MattDevo/edk2
|
BaseTools/Source/Python/Eot/EotMain.py
|
1
|
69215
|
## @file
# This file is used to be the main entrance of EOT tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os, time, glob
import Common.EdkLogger as EdkLogger
import Eot.EotGlobalData as EotGlobalData
from optparse import OptionParser
from Common.StringUtils import NormPath
from Common import BuildToolError
from Common.Misc import GuidStructureStringToGuidString, sdict
from Eot.Parser import *
from Eot.InfParserLite import EdkInfParser
from Common.StringUtils import GetSplitValueList
from Eot import c
from Eot import Database
from array import array
from Eot.Report import Report
from Common.BuildVersion import gBUILD_VERSION
from Eot.Parser import ConvertGuid
from Common.LongFilePathSupport import OpenLongFilePath as open
import struct
import uuid
import copy
import codecs
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
gGuidStringFormat = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X"
gIndention = -4
class Image(array):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = _HEADER_.size
def __new__(cls, *args, **kwargs):
return array.__new__(cls, 'B')
def __init__(self, ID=None):
if ID is None:
self._ID_ = str(uuid.uuid1()).upper()
else:
self._ID_ = ID
self._BUF_ = None
self._LEN_ = None
self._OFF_ = None
self._SubImages = sdict() # {offset: Image()}
array.__init__(self)
def __repr__(self):
return self._ID_
def __len__(self):
Len = array.__len__(self)
for Offset in self._SubImages.keys():
Len += len(self._SubImages[Offset])
return Len
def _Unpack(self):
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _Pack(self, PadByte=0xFF):
raise NotImplementedError
def frombuffer(self, Buffer, Offset=0, Size=None):
self._BUF_ = Buffer
self._OFF_ = Offset
# we may need the Size information in advance if it's given
self._LEN_ = Size
self._LEN_ = self._Unpack()
def empty(self):
del self[0:]
def GetField(self, FieldStruct, Offset=0):
return FieldStruct.unpack_from(self, Offset)
def SetField(self, FieldStruct, Offset, *args):
# check if there's enough space
Size = FieldStruct.size
if Size > len(self):
self.extend([0] * (Size - len(self)))
FieldStruct.pack_into(self, Offset, *args)
def _SetData(self, Data):
if len(self) < self._HEADER_SIZE_:
self.extend([0] * (self._HEADER_SIZE_ - len(self)))
else:
del self[self._HEADER_SIZE_:]
self.extend(Data)
def _GetData(self):
if len(self) > self._HEADER_SIZE_:
return self[self._HEADER_SIZE_:]
return None
Data = property(_GetData, _SetData)
## CompressedImage() class
#
# A class for Compressed Image
#
class CompressedImage(Image):
# UncompressedLength = 4-byte
# CompressionType = 1-byte
_HEADER_ = struct.Struct("1I 1B")
_HEADER_SIZE_ = _HEADER_.size
_ORIG_SIZE_ = struct.Struct("1I")
_CMPRS_TYPE_ = struct.Struct("4x 1B")
def __init__(self, CompressedData=None, CompressionType=None, UncompressedLength=None):
Image.__init__(self)
if UncompressedLength is not None:
self.UncompressedLength = UncompressedLength
if CompressionType is not None:
self.CompressionType = CompressionType
if CompressedData is not None:
self.Data = CompressedData
def __str__(self):
global gIndention
S = "algorithm=%s uncompressed=%x" % (self.CompressionType, self.UncompressedLength)
for Sec in self.Sections:
S += '\n' + str(Sec)
return S
def _SetOriginalSize(self, Size):
self.SetField(self._ORIG_SIZE_, 0, Size)
def _GetOriginalSize(self):
return self.GetField(self._ORIG_SIZE_)[0]
def _SetCompressionType(self, Type):
self.SetField(self._CMPRS_TYPE_, 0, Type)
def _GetCompressionType(self):
return self.GetField(self._CMPRS_TYPE_)[0]
def _GetSections(self):
try:
TmpData = DeCompress('Efi', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
except:
TmpData = DeCompress('Framework', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
SectionList = []
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
except:
break
SectionList.append(Sec)
return SectionList
UncompressedLength = property(_GetOriginalSize, _SetOriginalSize)
CompressionType = property(_GetCompressionType, _SetCompressionType)
Sections = property(_GetSections)
## Ui() class
#
# A class for Ui
#
class Ui(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
def __init__(self):
Image.__init__(self)
def __str__(self):
return self.String
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetUiString(self):
return codecs.utf_16_decode(self[0:-2].tostring())[0]
String = property(_GetUiString)
## Depex() class
#
# A class for Depex
#
class Depex(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
_GUID_ = struct.Struct("1I2H8B")
_OPCODE_ = struct.Struct("1B")
_OPCODE_STRING_ = {
0x00 : "BEFORE",
0x01 : "AFTER",
0x02 : "PUSH",
0x03 : "AND",
0x04 : "OR",
0x05 : "NOT",
0x06 : "TRUE",
0x07 : "FALSE",
0x08 : "END",
0x09 : "SOR"
}
_NEXT_ = {
-1 : _OPCODE_, # first one in depex must be an opcdoe
0x00 : _GUID_, #"BEFORE",
0x01 : _GUID_, #"AFTER",
0x02 : _GUID_, #"PUSH",
0x03 : _OPCODE_, #"AND",
0x04 : _OPCODE_, #"OR",
0x05 : _OPCODE_, #"NOT",
0x06 : _OPCODE_, #"TRUE",
0x07 : _OPCODE_, #"FALSE",
0x08 : None, #"END",
0x09 : _OPCODE_, #"SOR"
}
def __init__(self):
Image.__init__(self)
self._ExprList = []
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
S = '\n'
for T in self.Expression:
if T in self._OPCODE_STRING_:
S += Indention + self._OPCODE_STRING_[T]
if T not in [0x00, 0x01, 0x02]:
S += '\n'
else:
S += ' ' + gGuidStringFormat % T + '\n'
gIndention -= 4
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetExpression(self):
if self._ExprList == []:
Offset = 0
CurrentData = self._OPCODE_
while Offset < len(self):
Token = CurrentData.unpack_from(self, Offset)
Offset += CurrentData.size
if len(Token) == 1:
Token = Token[0]
if Token in self._NEXT_:
CurrentData = self._NEXT_[Token]
else:
CurrentData = self._GUID_
else:
CurrentData = self._OPCODE_
self._ExprList.append(Token)
if CurrentData is None:
break
return self._ExprList
Expression = property(_GetExpression)
# # FirmwareVolume() class
#
# A class for Firmware Volume
#
class FirmwareVolume(Image):
# Read FvLength, Attributes, HeaderLength, Checksum
_HEADER_ = struct.Struct("16x 1I2H8B 1Q 4x 1I 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_FfsGuid = "8C8CE578-8A3D-4F1C-9935-896185C32DD3"
_GUID_ = struct.Struct("16x 1I2H8B")
_LENGTH_ = struct.Struct("16x 16x 1Q")
_SIG_ = struct.Struct("16x 16x 8x 1I")
_ATTR_ = struct.Struct("16x 16x 8x 4x 1I")
_HLEN_ = struct.Struct("16x 16x 8x 4x 4x 1H")
_CHECKSUM_ = struct.Struct("16x 16x 8x 4x 4x 2x 1H")
def __init__(self, Name=''):
Image.__init__(self)
self.Name = Name
self.FfsDict = sdict()
self.OrderedFfsDict = sdict()
self.UnDispatchedFfsDict = sdict()
self.ProtocolList = sdict()
def CheckArchProtocol(self):
for Item in EotGlobalData.gArchProtocolGuids:
if Item.lower() not in EotGlobalData.gProtocolList:
return False
return True
def ParseDepex(self, Depex, Type):
List = None
if Type == 'Ppi':
List = EotGlobalData.gPpiList
if Type == 'Protocol':
List = EotGlobalData.gProtocolList
DepexStack = []
DepexList = []
DepexString = ''
FileDepex = None
CouldBeLoaded = True
for Index in range(0, len(Depex.Expression)):
Item = Depex.Expression[Index]
if Item == 0x00:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'BEFORE %s' % Guid, [Guid, 'BEFORE'])
elif Item == 0x01:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'AFTER %s' % Guid, [Guid, 'AFTER'])
elif Item == 0x02:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid.lower() in List:
DepexStack.append(True)
DepexList.append(Guid)
else:
DepexStack.append(False)
DepexList.append(Guid)
continue
elif Item == 0x03 or Item == 0x04:
DepexStack.append(eval(str(DepexStack.pop()) + ' ' + Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(str(DepexList.pop()) + ' ' + Depex._OPCODE_STRING_[Item].upper() + ' ' + str(DepexList.pop()))
elif Item == 0x05:
DepexStack.append(eval(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexList.pop()))
elif Item == 0x06:
DepexStack.append(True)
DepexList.append('TRUE')
DepexString = DepexString + 'TRUE' + ' '
elif Item == 0x07:
DepexStack.append(False)
DepexList.append('False')
DepexString = DepexString + 'FALSE' + ' '
elif Item == 0x08:
if Index != len(Depex.Expression) - 1:
CouldBeLoaded = False
else:
CouldBeLoaded = DepexStack.pop()
else:
CouldBeLoaded = False
if DepexList != []:
DepexString = DepexList[0].strip()
return (CouldBeLoaded, DepexString, FileDepex)
def Dispatch(self, Db=None):
if Db is None:
return False
self.UnDispatchedFfsDict = copy.copy(self.FfsDict)
# Find PeiCore, DexCore, PeiPriori, DxePriori first
FfsSecCoreGuid = None
FfsPeiCoreGuid = None
FfsDxeCoreGuid = None
FfsPeiPrioriGuid = None
FfsDxePrioriGuid = None
for FfsID in self.UnDispatchedFfsDict.keys():
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x03:
FfsSecCoreGuid = FfsID
continue
if Ffs.Type == 0x04:
FfsPeiCoreGuid = FfsID
continue
if Ffs.Type == 0x05:
FfsDxeCoreGuid = FfsID
continue
if Ffs.Guid.lower() == PEI_APRIORI_GUID.lower():
FfsPeiPrioriGuid = FfsID
continue
if Ffs.Guid.lower() == DXE_APRIORI_GUID.lower():
FfsDxePrioriGuid = FfsID
continue
# Parse SEC_CORE first
if FfsSecCoreGuid is not None:
self.OrderedFfsDict[FfsSecCoreGuid] = self.UnDispatchedFfsDict.pop(FfsSecCoreGuid)
self.LoadPpi(Db, FfsSecCoreGuid)
# Parse PEI first
if FfsPeiCoreGuid is not None:
self.OrderedFfsDict[FfsPeiCoreGuid] = self.UnDispatchedFfsDict.pop(FfsPeiCoreGuid)
self.LoadPpi(Db, FfsPeiCoreGuid)
if FfsPeiPrioriGuid is not None:
# Load PEIM described in priori file
FfsPeiPriori = self.UnDispatchedFfsDict.pop(FfsPeiPrioriGuid)
if len(FfsPeiPriori.Sections) == 1:
Section = FfsPeiPriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadPpi(Db, GuidString)
self.DisPatchPei(Db)
# Parse DXE then
if FfsDxeCoreGuid is not None:
self.OrderedFfsDict[FfsDxeCoreGuid] = self.UnDispatchedFfsDict.pop(FfsDxeCoreGuid)
self.LoadProtocol(Db, FfsDxeCoreGuid)
if FfsDxePrioriGuid is not None:
# Load PEIM described in priori file
FfsDxePriori = self.UnDispatchedFfsDict.pop(FfsDxePrioriGuid)
if len(FfsDxePriori.Sections) == 1:
Section = FfsDxePriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadProtocol(Db, GuidString)
self.DisPatchDxe(Db)
def LoadProtocol(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Protocol' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand = """select Value2 from Inf where BelongsToFile =
(select DISTINCT BelongsToFile from Inf
where Value1 =
(select SourceFileFullPath from Report
where GuidValue like '%s' and ItemMode = 'Callback'))
and Value1 = 'FILE_GUID'""" % Record[0]
CallBackSet = Db.TblReport.Exec(SqlCommand)
if CallBackSet != []:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
else:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
def LoadPpi(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Ppi' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
EotGlobalData.gPpiList[Record[0].lower()] = ModuleGuid
def DisPatchDxe(self, Db):
IsInstalled = False
ScheduleList = sdict()
for FfsID in self.UnDispatchedFfsDict.keys():
CouldBeLoaded = False
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x07:
# Get Depex
IsFoundDepex = False
for Section in Ffs.Sections.values():
# Find Depex
if Section.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Protocol')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Protocol')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Protocol')
break
# Not find Depex
if not IsFoundDepex:
CouldBeLoaded = self.CheckArchProtocol()
DepexString = ''
FileDepex = None
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
if FileDepex is not None:
ScheduleList.insert(FileDepex[1], FfsID, NewFfs, FileDepex[0])
else:
ScheduleList[FfsID] = NewFfs
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
for FfsID in ScheduleList.keys():
NewFfs = ScheduleList.pop(FfsID)
FfsName = 'UnKnown'
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadProtocol(Db, FfsID)
SqlCommand = """select Value2 from Inf
where BelongsToFile = (select BelongsToFile from Inf where Value1 = 'FILE_GUID' and lower(Value2) = lower('%s') and Model = %s)
and Model = %s and Value1='BASE_NAME'""" % (FfsID, 5001, 5001)
RecordSet = Db.TblReport.Exec(SqlCommand)
if RecordSet != []:
FfsName = RecordSet[0][0]
if IsInstalled:
self.DisPatchDxe(Db)
def DisPatchPei(self, Db):
IsInstalled = False
for FfsID in self.UnDispatchedFfsDict.keys():
CouldBeLoaded = True
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x06 or Ffs.Type == 0x08:
# Get Depex
for Section in Ffs.Sections.values():
if Section.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Ppi')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Ppi')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Ppi')
break
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadPpi(Db, FfsID)
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
if IsInstalled:
self.DisPatchPei(Db)
def __str__(self):
global gIndention
gIndention += 4
FvInfo = '\n' + ' ' * gIndention
FvInfo += "[FV:%s] file_system=%s size=%x checksum=%s\n" % (self.Name, self.FileSystemGuid, self.Size, self.Checksum)
FfsInfo = "\n".join([str(self.FfsDict[FfsId]) for FfsId in self.FfsDict])
gIndention -= 4
return FvInfo + FfsInfo
def _Unpack(self):
Size = self._LENGTH_.unpack_from(self._BUF_, self._OFF_)[0]
self.empty()
self.extend(self._BUF_[self._OFF_:self._OFF_ + Size])
# traverse the FFS
EndOfFv = Size
FfsStartAddress = self.HeaderSize
LastFfsObj = None
while FfsStartAddress < EndOfFv:
FfsObj = Ffs()
FfsObj.frombuffer(self, FfsStartAddress)
FfsId = repr(FfsObj)
if ((self.Attributes & 0x00000800) != 0 and len(FfsObj) == 0xFFFFFF) \
or ((self.Attributes & 0x00000800) == 0 and len(FfsObj) == 0):
if LastFfsObj is not None:
LastFfsObj.FreeSpace = EndOfFv - LastFfsObj._OFF_ - len(LastFfsObj)
else:
if FfsId in self.FfsDict:
EdkLogger.error("FV", 0, "Duplicate GUID in FFS",
ExtraData="\t%s @ %s\n\t%s @ %s" \
% (FfsObj.Guid, FfsObj.Offset,
self.FfsDict[FfsId].Guid, self.FfsDict[FfsId].Offset))
self.FfsDict[FfsId] = FfsObj
if LastFfsObj is not None:
LastFfsObj.FreeSpace = FfsStartAddress - LastFfsObj._OFF_ - len(LastFfsObj)
FfsStartAddress += len(FfsObj)
#
# align to next 8-byte aligned address: A = (A + 8 - 1) & (~(8 - 1))
# The next FFS must be at the latest next 8-byte aligned address
#
FfsStartAddress = (FfsStartAddress + 7) & (~7)
LastFfsObj = FfsObj
def _GetAttributes(self):
return self.GetField(self._ATTR_, 0)[0]
def _GetSize(self):
return self.GetField(self._LENGTH_, 0)[0]
def _GetChecksum(self):
return self.GetField(self._CHECKSUM_, 0)[0]
def _GetHeaderLength(self):
return self.GetField(self._HLEN_, 0)[0]
def _GetFileSystemGuid(self):
return gGuidStringFormat % self.GetField(self._GUID_, 0)
Attributes = property(_GetAttributes)
Size = property(_GetSize)
Checksum = property(_GetChecksum)
HeaderSize = property(_GetHeaderLength)
FileSystemGuid = property(_GetFileSystemGuid)
## GuidDefinedImage() class
#
# A class for GUID Defined Image
#
class GuidDefinedImage(Image):
_HEADER_ = struct.Struct("1I2H8B 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_GUID_ = struct.Struct("1I2H8B")
_DATA_OFFSET_ = struct.Struct("16x 1H")
_ATTR_ = struct.Struct("18x 1H")
CRC32_GUID = "FC1BCDB0-7D31-49AA-936A-A4600D9DD083"
TIANO_COMPRESS_GUID = 'A31280AD-481E-41B6-95E8-127F4C984779'
LZMA_COMPRESS_GUID = 'EE4E5898-3914-4259-9D6E-DC7BD79403CF'
def __init__(self, SectionDefinitionGuid=None, DataOffset=None, Attributes=None, Data=None):
Image.__init__(self)
if SectionDefinitionGuid is not None:
self.SectionDefinitionGuid = SectionDefinitionGuid
if DataOffset is not None:
self.DataOffset = DataOffset
if Attributes is not None:
self.Attributes = Attributes
if Data is not None:
self.Data = Data
def __str__(self):
S = "guid=%s" % (gGuidStringFormat % self.SectionDefinitionGuid)
for Sec in self.Sections:
S += "\n" + str(Sec)
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _SetAttribute(self, Attribute):
self.SetField(self._ATTR_, 0, Attribute)
def _GetAttribute(self):
return self.GetField(self._ATTR_)[0]
def _SetGuid(self, Guid):
self.SetField(self._GUID_, 0, Guid)
def _GetGuid(self):
return self.GetField(self._GUID_)
def _SetDataOffset(self, Offset):
self.SetField(self._DATA_OFFSET_, 0, Offset)
def _GetDataOffset(self):
return self.GetField(self._DATA_OFFSET_)[0]
def _GetSections(self):
SectionList = []
Guid = gGuidStringFormat % self.SectionDefinitionGuid
if Guid == self.CRC32_GUID:
# skip the CRC32 value, we don't do CRC32 verification here
Offset = self.DataOffset - 4
while Offset < len(self):
Sec = Section()
try:
Sec.frombuffer(self, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
elif Guid == self.TIANO_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Framework', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
elif Guid == self.LZMA_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Lzma', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
return SectionList
Attributes = property(_GetAttribute, _SetAttribute)
SectionDefinitionGuid = property(_GetGuid, _SetGuid)
DataOffset = property(_GetDataOffset, _SetDataOffset)
Sections = property(_GetSections)
## Section() class
#
# A class for Section
#
class Section(Image):
_TypeName = {
0x00 : "<unknown>",
0x01 : "COMPRESSION",
0x02 : "GUID_DEFINED",
0x10 : "PE32",
0x11 : "PIC",
0x12 : "TE",
0x13 : "DXE_DEPEX",
0x14 : "VERSION",
0x15 : "USER_INTERFACE",
0x16 : "COMPATIBILITY16",
0x17 : "FIRMWARE_VOLUME_IMAGE",
0x18 : "FREEFORM_SUBTYPE_GUID",
0x19 : "RAW",
0x1B : "PEI_DEPEX"
}
_SectionSubImages = {
0x01 : CompressedImage,
0x02 : GuidDefinedImage,
0x17 : FirmwareVolume,
0x13 : Depex,
0x1B : Depex,
0x15 : Ui
}
# Size = 3-byte
# Type = 1-byte
_HEADER_ = struct.Struct("3B 1B")
_HEADER_SIZE_ = _HEADER_.size
# SubTypeGuid
# _FREE_FORM_SUBTYPE_GUID_HEADER_ = struct.Struct("1I2H8B")
_SIZE_ = struct.Struct("3B")
_TYPE_ = struct.Struct("3x 1B")
def __init__(self, Type=None, Size=None):
Image.__init__(self)
self._Alignment = 1
if Type is not None:
self.Type = Type
if Size is not None:
self.Size = Size
def __str__(self):
global gIndention
gIndention += 4
SectionInfo = ' ' * gIndention
if self.Type in self._TypeName:
SectionInfo += "[SECTION:%s] offset=%x size=%x" % (self._TypeName[self.Type], self._OFF_, self.Size)
else:
SectionInfo += "[SECTION:%x<unknown>] offset=%x size=%x " % (self.Type, self._OFF_, self.Size)
for Offset in self._SubImages.keys():
SectionInfo += ", " + str(self._SubImages[Offset])
gIndention -= 4
return SectionInfo
def _Unpack(self):
self.empty()
Type, = self._TYPE_.unpack_from(self._BUF_, self._OFF_)
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
if Type not in self._SectionSubImages:
# no need to extract sub-image, keep all in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
else:
# keep header in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._HEADER_SIZE_])
#
# use new Image object to represent payload, which may be another kind
# of image such as PE32
#
PayloadOffset = self._HEADER_SIZE_
PayloadLen = self.Size - self._HEADER_SIZE_
Payload = self._SectionSubImages[self.Type]()
Payload.frombuffer(self._BUF_, self._OFF_ + self._HEADER_SIZE_, PayloadLen)
self._SubImages[PayloadOffset] = Payload
return Size
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _GetAlignment(self):
return self._Alignment
def _SetAlignment(self, Alignment):
self._Alignment = Alignment
AlignmentMask = Alignment - 1
# section alignment is actually for payload, so we need to add header size
PayloadOffset = self._OFF_ + self._HEADER_SIZE_
if (PayloadOffset & (~AlignmentMask)) == 0:
return
NewOffset = (PayloadOffset + AlignmentMask) & (~AlignmentMask)
while (NewOffset - PayloadOffset) < self._HEADER_SIZE_:
NewOffset += self._Alignment
def tofile(self, f):
self.Size = len(self)
Image.tofile(self, f)
for Offset in self._SubImages:
self._SubImages[Offset].tofile(f)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Alignment = property(_GetAlignment, _SetAlignment)
## Ffs() class
#
# A class for Ffs Section
#
class Ffs(Image):
_FfsFormat = "24B%(payload_size)sB"
# skip IntegrityCheck
_HEADER_ = struct.Struct("1I2H8B 2x 1B 1B 3B 1B")
_HEADER_SIZE_ = _HEADER_.size
_NAME_ = struct.Struct("1I2H8B")
_INT_CHECK_ = struct.Struct("16x 1H")
_TYPE_ = struct.Struct("18x 1B")
_ATTR_ = struct.Struct("19x 1B")
_SIZE_ = struct.Struct("20x 3B")
_STATE_ = struct.Struct("23x 1B")
VTF_GUID = "1BA0062E-C779-4582-8566-336AE8F78F09"
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
_TypeName = {
0x00 : "<unknown>",
0x01 : "RAW",
0x02 : "FREEFORM",
0x03 : "SECURITY_CORE",
0x04 : "PEI_CORE",
0x05 : "DXE_CORE",
0x06 : "PEIM",
0x07 : "DRIVER",
0x08 : "COMBINED_PEIM_DRIVER",
0x09 : "APPLICATION",
0x0A : "SMM",
0x0B : "FIRMWARE_VOLUME_IMAGE",
0x0C : "COMBINED_SMM_DXE",
0x0D : "SMM_CORE",
0x0E : "MM_STANDALONE",
0x0F : "MM_CORE_STANDALONE",
0xc0 : "OEM_MIN",
0xdf : "OEM_MAX",
0xe0 : "DEBUG_MIN",
0xef : "DEBUG_MAX",
0xf0 : "FFS_MIN",
0xff : "FFS_MAX",
0xf0 : "FFS_PAD",
}
def __init__(self):
Image.__init__(self)
self.FreeSpace = 0
self.Sections = sdict()
self.Depex = ''
self.__ID__ = None
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
FfsInfo = Indention
FfsInfo += "[FFS:%s] offset=%x size=%x guid=%s free_space=%x alignment=%s\n" % \
(Ffs._TypeName[self.Type], self._OFF_, self.Size, self.Guid, self.FreeSpace, self.Alignment)
SectionInfo = '\n'.join([str(self.Sections[Offset]) for Offset in self.Sections.keys()])
gIndention -= 4
return FfsInfo + SectionInfo + "\n"
def __len__(self):
return self.Size
def __repr__(self):
return self.__ID__
def _Unpack(self):
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
# Pad FFS may use the same GUID. We need to avoid it.
if self.Type == 0xf0:
self.__ID__ = str(uuid.uuid1()).upper()
else:
self.__ID__ = self.Guid
# Traverse the SECTION. RAW and PAD do not have sections
if self.Type not in [0xf0, 0x01] and Size > 0 and Size < 0xFFFFFF:
EndOfFfs = Size
SectionStartAddress = self._HEADER_SIZE_
while SectionStartAddress < EndOfFfs:
SectionObj = Section()
SectionObj.frombuffer(self, SectionStartAddress)
#f = open(repr(SectionObj), 'wb')
#SectionObj.Size = 0
#SectionObj.tofile(f)
#f.close()
self.Sections[SectionStartAddress] = SectionObj
SectionStartAddress += len(SectionObj)
SectionStartAddress = (SectionStartAddress + 3) & (~3)
def Pack(self):
pass
def SetFreeSpace(self, Size):
self.FreeSpace = Size
def _GetGuid(self):
return gGuidStringFormat % self.Name
def _SetName(self, Value):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
self.SetField(self._NAME_, 0, Value)
def _GetName(self):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
return self.GetField(self._NAME_)
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _SetAttributes(self, Value):
self.SetField(self._ATTR_, 0, Value)
def _GetAttributes(self):
return self.GetField(self._ATTR_)[0]
def _GetFixed(self):
if (self.Attributes & self.FFS_ATTRIB_FIXED) != 0:
return True
return False
def _GetCheckSum(self):
if (self.Attributes & self.FFS_ATTRIB_CHECKSUM) != 0:
return True
return False
def _GetAlignment(self):
return (self.Attributes & self.FFS_ATTRIB_DATA_ALIGNMENT) >> 3
def _SetState(self, Value):
self.SetField(self._STATE_, 0, Value)
def _GetState(self):
return self.GetField(self._STATE_)[0]
Name = property(_GetName, _SetName)
Guid = property(_GetGuid)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Attributes = property(_GetAttributes, _SetAttributes)
Fixed = property(_GetFixed)
Checksum = property(_GetCheckSum)
Alignment = property(_GetAlignment)
State = property(_GetState, _SetState)
## MultipleFv() class
#
# A class for Multiple FV
#
class MultipleFv(FirmwareVolume):
def __init__(self, FvList):
FirmwareVolume.__init__(self)
self.BasicInfo = []
for FvPath in FvList:
Fd = None
FvName = os.path.splitext(os.path.split(FvPath)[1])[0]
if FvPath.strip():
Fd = open(FvPath, 'rb')
Buf = array('B')
try:
Buf.fromfile(Fd, os.path.getsize(FvPath))
except EOFError:
pass
Fv = FirmwareVolume(FvName)
Fv.frombuffer(Buf, 0, len(Buf))
self.BasicInfo.append([Fv.Name, Fv.FileSystemGuid, Fv.Size])
self.FfsDict.append(Fv.FfsDict)
## Class Eot
#
# This class is used to define Eot main entrance
#
# @param object: Inherited from object class
#
class Eot(object):
## The constructor
#
# @param self: The object pointer
#
def __init__(self, CommandLineOption=True, IsInit=True, SourceFileList=None, \
IncludeDirList=None, DecFileList=None, GuidList=None, LogFile=None,
FvFileList="", MapFileList="", Report='Report.html', Dispatch=None):
# Version and Copyright
self.VersionNumber = ("0.02" + " " + gBUILD_VERSION)
self.Version = "%prog Version " + self.VersionNumber
self.Copyright = "Copyright (c) 2008 - 2018, Intel Corporation All rights reserved."
self.Report = Report
self.IsInit = IsInit
self.SourceFileList = SourceFileList
self.IncludeDirList = IncludeDirList
self.DecFileList = DecFileList
self.GuidList = GuidList
self.LogFile = LogFile
self.FvFileList = FvFileList
self.MapFileList = MapFileList
self.Dispatch = Dispatch
# Check workspace environment
if "EFI_SOURCE" not in os.environ:
if "EDK_SOURCE" not in os.environ:
pass
else:
EotGlobalData.gEDK_SOURCE = os.path.normpath(os.getenv("EDK_SOURCE"))
else:
EotGlobalData.gEFI_SOURCE = os.path.normpath(os.getenv("EFI_SOURCE"))
EotGlobalData.gEDK_SOURCE = os.path.join(EotGlobalData.gEFI_SOURCE, 'Edk')
if "WORKSPACE" not in os.environ:
EdkLogger.error("EOT", BuildToolError.ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
else:
EotGlobalData.gWORKSPACE = os.path.normpath(os.getenv("WORKSPACE"))
EotGlobalData.gMACRO['WORKSPACE'] = EotGlobalData.gWORKSPACE
EotGlobalData.gMACRO['EFI_SOURCE'] = EotGlobalData.gEFI_SOURCE
EotGlobalData.gMACRO['EDK_SOURCE'] = EotGlobalData.gEDK_SOURCE
# Parse the options and args
if CommandLineOption:
self.ParseOption()
if self.FvFileList:
for FvFile in GetSplitValueList(self.FvFileList, ' '):
FvFile = os.path.normpath(FvFile)
if not os.path.isfile(FvFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % FvFile)
EotGlobalData.gFV_FILE.append(FvFile)
else:
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "The fv file list of target platform was not specified")
if self.MapFileList:
for MapFile in GetSplitValueList(self.MapFileList, ' '):
MapFile = os.path.normpath(MapFile)
if not os.path.isfile(MapFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % MapFile)
EotGlobalData.gMAP_FILE.append(MapFile)
# Generate source file list
self.GenerateSourceFileList(self.SourceFileList, self.IncludeDirList)
# Generate guid list of dec file list
self.ParseDecFile(self.DecFileList)
# Generate guid list from GUID list file
self.ParseGuidList(self.GuidList)
# Init Eot database
EotGlobalData.gDb = Database.Database(Database.DATABASE_PATH)
EotGlobalData.gDb.InitDatabase(self.IsInit)
# Build ECC database
self.BuildDatabase()
# Parse Ppi/Protocol
self.ParseExecutionOrder()
# Merge Identifier tables
self.GenerateQueryTable()
# Generate report database
self.GenerateReportDatabase()
# Load Fv Info
self.LoadFvInfo()
# Load Map Info
self.LoadMapInfo()
# Generate Report
self.GenerateReport()
# Convert log file
self.ConvertLogFile(self.LogFile)
# DONE
EdkLogger.quiet("EOT FINISHED!")
# Close Database
EotGlobalData.gDb.Close()
## ParseDecFile() method
#
# parse DEC file and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param DecFileList: A list of all DEC files
#
def ParseDecFile(self, DecFileList):
if DecFileList:
path = os.path.normpath(DecFileList)
lfr = open(path, 'rb')
for line in lfr:
path = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if os.path.exists(path):
dfr = open(path, 'rb')
for line in dfr:
line = CleanString(line)
list = line.split('=')
if len(list) == 2:
EotGlobalData.gGuidDict[list[0].strip()] = GuidStructureStringToGuidString(list[1].strip())
## ParseGuidList() method
#
# Parse Guid list and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param GuidList: A list of all GUID and its value
#
def ParseGuidList(self, GuidList):
Path = os.path.join(EotGlobalData.gWORKSPACE, GuidList)
if os.path.isfile(Path):
for Line in open(Path):
if Line.strip():
(GuidName, GuidValue) = Line.split()
EotGlobalData.gGuidDict[GuidName] = GuidValue
## ConvertLogFile() method
#
# Parse a real running log file to get real dispatch order
# The result is saved to old file name + '.new'
#
# @param self: The object pointer
# @param LogFile: A real running log file name
#
def ConvertLogFile(self, LogFile):
newline = []
lfr = None
lfw = None
if LogFile:
lfr = open(LogFile, 'rb')
lfw = open(LogFile + '.new', 'wb')
for line in lfr:
line = line.strip()
line = line.replace('.efi', '')
index = line.find("Loading PEIM at ")
if index > -1:
newline.append(line[index + 55 : ])
continue
index = line.find("Loading driver at ")
if index > -1:
newline.append(line[index + 57 : ])
continue
for line in newline:
lfw.write(line + '\r\n')
if lfr:
lfr.close()
if lfw:
lfw.close()
## GenerateSourceFileList() method
#
# Generate a list of all source files
# 1. Search the file list one by one
# 2. Store inf file name with source file names under it like
# { INF file name: [source file1, source file2, ...]}
# 3. Search the include list to find all .h files
# 4. Store source file list to EotGlobalData.gSOURCE_FILES
# 5. Store INF file list to EotGlobalData.gINF_FILES
#
# @param self: The object pointer
# @param SourceFileList: A list of all source files
# @param IncludeFileList: A list of all include files
#
def GenerateSourceFileList(self, SourceFileList, IncludeFileList):
EdkLogger.quiet("Generating source files list ... ")
mSourceFileList = []
mInfFileList = []
mDecFileList = []
mFileList = {}
mCurrentInfFile = ''
mCurrentSourceFileList = []
if SourceFileList:
sfl = open(SourceFileList, 'r')
for line in sfl:
line = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if line[-2:].upper() == '.C' or line[-2:].upper() == '.H':
if line not in mCurrentSourceFileList:
mCurrentSourceFileList.append(line)
mSourceFileList.append(line)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % line)
if line[-4:].upper() == '.INF':
if mCurrentInfFile != '':
mFileList[mCurrentInfFile] = mCurrentSourceFileList
mCurrentSourceFileList = []
mCurrentInfFile = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line))
EotGlobalData.gOP_INF.write('%s\n' % mCurrentInfFile)
if mCurrentInfFile not in mFileList:
mFileList[mCurrentInfFile] = mCurrentSourceFileList
# Get all include files from packages
if IncludeFileList:
ifl = open(IncludeFileList, 'rb')
for line in ifl:
if not line.strip():
continue
newline = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
for Root, Dirs, Files in os.walk(str(newline)):
for File in Files:
FullPath = os.path.normpath(os.path.join(Root, File))
if FullPath not in mSourceFileList and File[-2:].upper() == '.H':
mSourceFileList.append(FullPath)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % FullPath)
if FullPath not in mDecFileList and File.upper().find('.DEC') > -1:
mDecFileList.append(FullPath)
EotGlobalData.gSOURCE_FILES = mSourceFileList
EotGlobalData.gOP_SOURCE_FILES.close()
EotGlobalData.gINF_FILES = mFileList
EotGlobalData.gOP_INF.close()
## GenerateReport() method
#
# Generate final HTML report
#
# @param self: The object pointer
#
def GenerateReport(self):
EdkLogger.quiet("Generating report file ... ")
Rep = Report(self.Report, EotGlobalData.gFV, self.Dispatch)
Rep.GenerateReport()
## LoadMapInfo() method
#
# Load map files and parse them
#
# @param self: The object pointer
#
def LoadMapInfo(self):
if EotGlobalData.gMAP_FILE != []:
EdkLogger.quiet("Parsing Map file ... ")
EotGlobalData.gMap = ParseMapFile(EotGlobalData.gMAP_FILE)
## LoadFvInfo() method
#
# Load FV binary files and parse them
#
# @param self: The object pointer
#
def LoadFvInfo(self):
EdkLogger.quiet("Parsing FV file ... ")
EotGlobalData.gFV = MultipleFv(EotGlobalData.gFV_FILE)
EotGlobalData.gFV.Dispatch(EotGlobalData.gDb)
for Protocol in EotGlobalData.gProtocolList:
EotGlobalData.gOP_UN_MATCHED_IN_LIBRARY_CALLING.write('%s\n' %Protocol)
## GenerateReportDatabase() method
#
# Generate data for the information needed by report
# 1. Update name, macro and value of all found PPI/PROTOCOL GUID
# 2. Install hard coded PPI/PROTOCOL
#
# @param self: The object pointer
#
def GenerateReportDatabase(self):
EdkLogger.quiet("Generating the cross-reference table of GUID for Ppi/Protocol ... ")
# Update Protocol/Ppi Guid
SqlCommand = """select DISTINCT GuidName from Report"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
GuidName = Record[0]
GuidMacro = ''
GuidMacro2 = ''
GuidValue = ''
# Find guid value defined in Dec file
if GuidName in EotGlobalData.gGuidDict:
GuidValue = EotGlobalData.gGuidDict[GuidName]
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Search defined Macros for guid name
SqlCommand ="""select DISTINCT Value, Modifier from Query where Name like '%s'""" % GuidName
GuidMacroSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
# Ignore NULL result
if not GuidMacroSet:
continue
GuidMacro = GuidMacroSet[0][0].strip()
if not GuidMacro:
continue
# Find Guid value of Guid Macro
SqlCommand ="""select DISTINCT Value from Query2 where Value like '%%%s%%' and Model = %s""" % (GuidMacro, MODEL_IDENTIFIER_MACRO_DEFINE)
GuidValueSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
if GuidValueSet != []:
GuidValue = GuidValueSet[0][0]
GuidValue = GuidValue[GuidValue.find(GuidMacro) + len(GuidMacro) :]
GuidValue = GuidValue.lower().replace('\\', '').replace('\r', '').replace('\n', '').replace('l', '').strip()
GuidValue = GuidStructureStringToGuidString(GuidValue)
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Update Hard Coded Ppi/Protocol
SqlCommand = """select DISTINCT GuidValue, ItemType from Report where ModuleID = -2 and ItemMode = 'Produced'"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
if Record[1] == 'Ppi':
EotGlobalData.gPpiList[Record[0].lower()] = -2
if Record[1] == 'Protocol':
EotGlobalData.gProtocolList[Record[0].lower()] = -2
## GenerateQueryTable() method
#
# Generate two tables improve query performance
#
# @param self: The object pointer
#
def GenerateQueryTable(self):
EdkLogger.quiet("Generating temp query table for analysis ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
SqlCommand = """insert into Query (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where (Model = %s or Model = %s)""" \
% (Identifier[0], MODEL_IDENTIFIER_VARIABLE, MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
SqlCommand = """insert into Query2 (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where Model = %s""" \
% (Identifier[0], MODEL_IDENTIFIER_MACRO_DEFINE)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
## ParseExecutionOrder() method
#
# Get final execution order
# 1. Search all PPI
# 2. Search all PROTOCOL
#
# @param self: The object pointer
#
def ParseExecutionOrder(self):
EdkLogger.quiet("Searching Ppi/Protocol ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
ModuleID, ModuleName, ModuleGuid, SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, Enabled = \
-1, '', '', -1, '', '', '', '', '', '', '', '', 0
SourceFileID = Identifier[0].replace('Identifier', '')
SourceFileFullPath = Identifier[1]
Identifier = Identifier[0]
# Find Ppis
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallPpi', '->InstallPpi', 'PeiInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.ReInstallPpi', '->ReInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchPpiCallFunction(Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocatePpi', '->LocatePpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Ppi', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.NotifyPpi', '->NotifyPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
# Find Procotols
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallProtocolInterface', '.ReInstallProtocolInterface', '->InstallProtocolInterface', '->ReInstallProtocolInterface', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallMultipleProtocolInterfaces', '->InstallMultipleProtocolInterfaces', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocateProtocol', '->LocateProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.HandleProtocol', '->HandleProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.RegisterProtocolNotify', '->RegisterProtocolNotify', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
# Hard Code
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiSecPlatformInformationPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiNtLoadAsDllPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtPeiLoadFileGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtAutoScanPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtFwhPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtThunkPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiPlatformTypePpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiFrequencySelectionCpuPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiCachePpiGuid', '', '', '', 0)
EotGlobalData.gDb.Conn.commit()
## BuildDatabase() methoc
#
# Build the database for target
#
# @param self: The object pointer
#
def BuildDatabase(self):
# Clean report table
EotGlobalData.gDb.TblReport.Drop()
EotGlobalData.gDb.TblReport.Create()
# Build database
if self.IsInit:
self.BuildMetaDataFileDatabase(EotGlobalData.gINF_FILES)
EdkLogger.quiet("Building database for source code ...")
c.CreateCCodeDB(EotGlobalData.gSOURCE_FILES)
EdkLogger.quiet("Building database for source code done!")
EotGlobalData.gIdentifierTableList = GetTableList((MODEL_FILE_C, MODEL_FILE_H), 'Identifier', EotGlobalData.gDb)
## BuildMetaDataFileDatabase() method
#
# Build the database for meta data files
#
# @param self: The object pointer
# @param Inf_Files: A list for all INF files
#
def BuildMetaDataFileDatabase(self, Inf_Files):
EdkLogger.quiet("Building database for meta data files ...")
for InfFile in Inf_Files:
if not InfFile:
continue
EdkLogger.quiet("Parsing %s ..." % str(InfFile))
EdkInfParser(InfFile, EotGlobalData.gDb, Inf_Files[InfFile], '')
EotGlobalData.gDb.Conn.commit()
EdkLogger.quiet("Building database for meta data files done!")
## ParseOption() method
#
# Parse command line options
#
# @param self: The object pointer
#
def ParseOption(self):
(Options, Target) = self.EotOptionParser()
# Set log level
self.SetLogLevel(Options)
if Options.FvFileList:
self.FvFileList = Options.FvFileList
if Options.MapFileList:
self.MapFileList = Options.FvMapFileList
if Options.SourceFileList:
self.SourceFileList = Options.SourceFileList
if Options.IncludeDirList:
self.IncludeDirList = Options.IncludeDirList
if Options.DecFileList:
self.DecFileList = Options.DecFileList
if Options.GuidList:
self.GuidList = Options.GuidList
if Options.LogFile:
self.LogFile = Options.LogFile
if Options.keepdatabase:
self.IsInit = False
## SetLogLevel() method
#
# Set current log level of the tool based on args
#
# @param self: The object pointer
# @param Option: The option list including log level setting
#
def SetLogLevel(self, Option):
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
## EotOptionParser() method
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @param self: The object pointer
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def EotOptionParser(self):
Parser = OptionParser(description = self.Copyright, version = self.Version, prog = "Eot.exe", usage = "%prog [options]")
Parser.add_option("-m", "--makefile filename", action="store", type="string", dest='MakeFile',
help="Specify a makefile for the platform.")
Parser.add_option("-c", "--dsc filename", action="store", type="string", dest="DscFile",
help="Specify a dsc file for the platform.")
Parser.add_option("-f", "--fv filename", action="store", type="string", dest="FvFileList",
help="Specify fv file list, quoted by \"\".")
Parser.add_option("-a", "--map filename", action="store", type="string", dest="MapFileList",
help="Specify map file list, quoted by \"\".")
Parser.add_option("-s", "--source files", action="store", type="string", dest="SourceFileList",
help="Specify source file list by a file")
Parser.add_option("-i", "--include dirs", action="store", type="string", dest="IncludeDirList",
help="Specify include dir list by a file")
Parser.add_option("-e", "--dec files", action="store", type="string", dest="DecFileList",
help="Specify dec file list by a file")
Parser.add_option("-g", "--guid list", action="store", type="string", dest="GuidList",
help="Specify guid file list by a file")
Parser.add_option("-l", "--log filename", action="store", type="string", dest="LogFile",
help="Specify real execution log file")
Parser.add_option("-k", "--keepdatabase", action="store_true", type=None, help="The existing Eot database will not be cleaned except report information if this option is specified.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
(Opt, Args)=Parser.parse_args()
return (Opt, Args)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
# Initialize log system
EdkLogger.Initialize()
EdkLogger.IsRaiseError = False
EdkLogger.quiet(time.strftime("%H:%M:%S, %b.%d %Y ", time.localtime()) + "[00:00]" + "\n")
StartTime = time.clock()
Eot = Eot(CommandLineOption=False,
SourceFileList=r'C:\TestEot\Source.txt',
GuidList=r'C:\TestEot\Guid.txt',
FvFileList=r'C:\TestEot\FVRECOVERY.Fv')
FinishTime = time.clock()
BuildDuration = time.strftime("%M:%S", time.gmtime(int(round(FinishTime - StartTime))))
EdkLogger.quiet("\n%s [%s]" % (time.strftime("%H:%M:%S, %b.%d %Y", time.localtime()), BuildDuration))
|
bsd-2-clause
| -5,653,460,506,086,089,000
| 38.241279
| 199
| 0.54166
| false
| 3.864385
| false
| false
| false
|
nilouco/dpAutoRigSystem
|
dpAutoRigSystem/Modules/dpWheel.py
|
1
|
35159
|
# Thanks to Andrew Christophersen
# Maya Wheel Rig with World Vectors video tutorial
# https://youtu.be/QpDc93br3dM
# importing libraries:
import maya.cmds as cmds
from Library import dpUtils as utils
import dpBaseClass as Base
import dpLayoutClass as Layout
# global variables to this module:
CLASS_NAME = "Wheel"
TITLE = "m156_wheel"
DESCRIPTION = "m157_wheelDesc"
ICON = "/Icons/dp_wheel.png"
class Wheel(Base.StartClass, Layout.LayoutClass):
def __init__(self, *args, **kwargs):
#Add the needed parameter to the kwargs dict to be able to maintain the parameter order
kwargs["CLASS_NAME"] = CLASS_NAME
kwargs["TITLE"] = TITLE
kwargs["DESCRIPTION"] = DESCRIPTION
kwargs["ICON"] = ICON
Base.StartClass.__init__(self, *args, **kwargs)
def createModuleLayout(self, *args):
Base.StartClass.createModuleLayout(self)
Layout.LayoutClass.basicModuleLayout(self)
def createGuide(self, *args):
Base.StartClass.createGuide(self)
# Custom GUIDE:
cmds.addAttr(self.moduleGrp, longName="flip", attributeType='bool')
cmds.addAttr(self.moduleGrp, longName="geo", dataType='string')
cmds.addAttr(self.moduleGrp, longName="startFrame", attributeType='long', defaultValue=1)
cmds.addAttr(self.moduleGrp, longName="showControls", attributeType='bool')
cmds.addAttr(self.moduleGrp, longName="steering", attributeType='bool')
cmds.setAttr(self.moduleGrp+".flip", 0)
cmds.setAttr(self.moduleGrp+".showControls", 1)
cmds.setAttr(self.moduleGrp+".steering", 0)
cmds.setAttr(self.moduleGrp+".moduleNamespace", self.moduleGrp[:self.moduleGrp.rfind(":")], type='string')
self.cvCenterLoc = self.ctrls.cvJointLoc(ctrlName=self.guideName+"_CenterLoc", r=0.6, d=1, rot=(90, 0, 90), guide=True)
self.jGuideCenter = cmds.joint(name=self.guideName+"_JGuideCenter", radius=0.001)
cmds.setAttr(self.jGuideCenter+".template", 1)
cmds.parent(self.jGuideCenter, self.moduleGrp, relative=True)
self.cvFrontLoc = self.ctrls.cvControl("id_059_AimLoc", ctrlName=self.guideName+"_FrontLoc", r=0.3, d=1, rot=(0, 0, 90))
self.ctrls.colorShape([self.cvFrontLoc], "blue")
shapeSizeCH = self.ctrls.shapeSizeSetup(self.cvFrontLoc)
cmds.parent(self.cvFrontLoc, self.cvCenterLoc)
cmds.setAttr(self.cvFrontLoc+".tx", 1.3)
self.jGuideFront = cmds.joint(name=self.guideName+"_JGuideFront", radius=0.001)
cmds.setAttr(self.jGuideFront+".template", 1)
cmds.transformLimits(self.cvFrontLoc, translationX=(1, 1), enableTranslationX=(True, False))
radiusCtrl = self.moduleGrp+"_RadiusCtrl"
cvFrontLocPosNode = cmds.createNode("plusMinusAverage", name=self.cvFrontLoc+"_Pos_PMA")
cmds.setAttr(cvFrontLocPosNode+".input1D[0]", -0.5)
cmds.connectAttr(radiusCtrl+".translateX", cvFrontLocPosNode+".input1D[1]")
cmds.connectAttr(cvFrontLocPosNode+".output1D", self.cvFrontLoc+".tx")
self.ctrls.setLockHide([self.cvCenterLoc, self.cvFrontLoc], ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])
self.cvInsideLoc = self.ctrls.cvLocator(ctrlName=self.guideName+"_InsideLoc", r=0.2, d=1, guide=True)
cmds.parent(self.cvInsideLoc, self.cvCenterLoc)
cmds.setAttr(self.cvInsideLoc+".tz", 0.3)
self.jGuideInside = cmds.joint(name=self.guideName+"_JGuideInside", radius=0.001)
cmds.setAttr(self.jGuideInside+".template", 1)
cmds.transformLimits(self.cvInsideLoc, tz=(0.01, 1), etz=(True, False))
inverseRadius = cmds.createNode("multiplyDivide", name=self.moduleGrp+"_Radius_Inv_MD")
cmds.setAttr(inverseRadius+".input2X", -1)
cmds.connectAttr(radiusCtrl+".translateX", inverseRadius+".input1X")
cmds.connectAttr(inverseRadius+".outputX", self.cvInsideLoc+".translateY")
self.ctrls.setLockHide([self.cvInsideLoc], ['tx', 'ty', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])
self.cvOutsideLoc = self.ctrls.cvLocator(ctrlName=self.guideName+"_OutsideLoc", r=0.2, d=1, guide=True)
cmds.parent(self.cvOutsideLoc, self.cvCenterLoc)
cmds.setAttr(self.cvOutsideLoc+".tz", -0.3)
self.jGuideOutside = cmds.joint(name=self.guideName+"_JGuideOutside", radius=0.001)
cmds.setAttr(self.jGuideOutside+".template", 1)
cmds.transformLimits(self.cvOutsideLoc, tz=(-1, 0.01), etz=(False, True))
cmds.connectAttr(inverseRadius+".outputX", self.cvOutsideLoc+".translateY")
self.ctrls.setLockHide([self.cvOutsideLoc], ['tx', 'ty', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])
cmds.parent(self.cvCenterLoc, self.moduleGrp)
cmds.parent(self.jGuideFront, self.jGuideInside, self.jGuideOutside, self.jGuideCenter)
cmds.parentConstraint(self.cvCenterLoc, self.jGuideCenter, maintainOffset=False, name=self.jGuideCenter+"_PaC")
cmds.parentConstraint(self.cvFrontLoc, self.jGuideFront, maintainOffset=False, name=self.jGuideFront+"_PaC")
cmds.parentConstraint(self.cvInsideLoc, self.jGuideInside, maintainOffset=False, name=self.cvInsideLoc+"_PaC")
cmds.parentConstraint(self.cvOutsideLoc, self.jGuideOutside, maintainOffset=False, name=self.cvOutsideLoc+"_PaC")
def changeStartFrame(self, *args):
""" Update moduleGrp startFrame attribute from UI.
"""
newStartFrameValue = cmds.intField(self.startFrameIF, query=True, value=True)
cmds.setAttr(self.moduleGrp+".startFrame", newStartFrameValue)
def changeSteering(self, *args):
""" Update moduleGrp steering attribute from UI.
"""
newSterringValue = cmds.checkBox(self.steeringCB, query=True, value=True)
cmds.setAttr(self.moduleGrp+".steering", newSterringValue)
def changeShowControls(self, *args):
""" Update moduleGrp showControls attribute from UI.
"""
newShowControlsValue = cmds.checkBox(self.showControlsCB, query=True, value=True)
cmds.setAttr(self.moduleGrp+".showControls", newShowControlsValue)
def changeGeo(self, *args):
""" Update moduleGrp geo attribute from UI textField.
"""
newGeoValue = cmds.textField(self.geoTF, query=True, text=True)
cmds.setAttr(self.moduleGrp+".geo", newGeoValue, type='string')
def rigModule(self, *args):
Base.StartClass.rigModule(self)
# verify if the guide exists:
if cmds.objExists(self.moduleGrp):
try:
hideJoints = cmds.checkBox('hideJointsCB', query=True, value=True)
except:
hideJoints = 1
# declare lists to store names and attributes:
self.mainCtrlList, self.wheelCtrlList, self.steeringGrpList, self.ctrlHookGrpList = [], [], [], []
# start as no having mirror:
sideList = [""]
# analisys the mirror module:
self.mirrorAxis = cmds.getAttr(self.moduleGrp+".mirrorAxis")
if self.mirrorAxis != 'off':
# get rigs names:
self.mirrorNames = cmds.getAttr(self.moduleGrp+".mirrorName")
# get first and last letters to use as side initials (prefix):
sideList = [ self.mirrorNames[0]+'_', self.mirrorNames[len(self.mirrorNames)-1]+'_' ]
for s, side in enumerate(sideList):
duplicated = cmds.duplicate(self.moduleGrp, name=side+self.userGuideName+'_Guide_Base')[0]
allGuideList = cmds.listRelatives(duplicated, allDescendents=True)
for item in allGuideList:
cmds.rename(item, side+self.userGuideName+"_"+item)
self.mirrorGrp = cmds.group(name="Guide_Base_Grp", empty=True)
cmds.parent(side+self.userGuideName+'_Guide_Base', self.mirrorGrp, absolute=True)
# re-rename grp:
cmds.rename(self.mirrorGrp, side+self.userGuideName+'_'+self.mirrorGrp)
# do a group mirror with negative scaling:
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 0:
for axis in self.mirrorAxis:
gotValue = cmds.getAttr(side+self.userGuideName+"_Guide_Base.translate"+axis)
flipedValue = gotValue*(-2)
cmds.setAttr(side+self.userGuideName+'_'+self.mirrorGrp+'.translate'+axis, flipedValue)
else:
for axis in self.mirrorAxis:
cmds.setAttr(side+self.userGuideName+'_'+self.mirrorGrp+'.scale'+axis, -1)
# joint labelling:
jointLabelAdd = 1
else: # if not mirror:
duplicated = cmds.duplicate(self.moduleGrp, name=self.userGuideName+'_Guide_Base')[0]
allGuideList = cmds.listRelatives(duplicated, allDescendents=True)
for item in allGuideList:
cmds.rename(item, self.userGuideName+"_"+item)
self.mirrorGrp = cmds.group(self.userGuideName+'_Guide_Base', name="Guide_Base_Grp", relative=True)
#for Maya2012: self.userGuideName+'_'+self.moduleGrp+"_Grp"
# re-rename grp:
cmds.rename(self.mirrorGrp, self.userGuideName+'_'+self.mirrorGrp)
# joint labelling:
jointLabelAdd = 0
# store the number of this guide by module type
dpAR_count = utils.findModuleLastNumber(CLASS_NAME, "dpAR_type") + 1
# run for all sides
for s, side in enumerate(sideList):
# declare guides:
self.base = side+self.userGuideName+'_Guide_Base'
self.cvCenterLoc = side+self.userGuideName+"_Guide_CenterLoc"
self.cvFrontLoc = side+self.userGuideName+"_Guide_FrontLoc"
self.cvInsideLoc = side+self.userGuideName+"_Guide_InsideLoc"
self.cvOutsideLoc = side+self.userGuideName+"_Guide_OutsideLoc"
self.radiusGuide = side+self.userGuideName+"_Guide_Base_RadiusCtrl"
# create a joint:
cmds.select(clear=True)
# center joint:
self.centerJoint = cmds.joint(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Jnt", scaleCompensate=False)
cmds.addAttr(self.centerJoint, longName='dpAR_joint', attributeType='float', keyable=False)
# joint labelling:
utils.setJointLabel(self.centerJoint, s+jointLabelAdd, 18, self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel'])
# create end joint:
self.endJoint = cmds.joint(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_JEnd", radius=0.5)
# main joint:
cmds.select(clear=True)
self.mainJoint = cmds.joint(name=side+self.userGuideName+"_"+self.langDic[self.langName]['c058_main']+"_Jnt", scaleCompensate=False)
cmds.addAttr(self.mainJoint, longName='dpAR_joint', attributeType='float', keyable=False)
# joint labelling:
utils.setJointLabel(self.mainJoint, s+jointLabelAdd, 18, self.userGuideName+"_"+self.langDic[self.langName]['c058_main'])
# create end joint:
self.mainEndJoint = cmds.joint(name=side+self.userGuideName+"_"+self.langDic[self.langName]['c058_main']+"_JEnd", radius=0.5)
# create controls:
self.wheelCtrl = self.ctrls.cvControl("id_060_WheelCenter", side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Ctrl", r=self.ctrlRadius, d=self.curveDegree)
self.mainCtrl = self.ctrls.cvControl("id_061_WheelMain", side+self.userGuideName+"_"+self.langDic[self.langName]['c058_main']+"_Ctrl", r=self.ctrlRadius*0.4, d=self.curveDegree)
self.insideCtrl = self.ctrls.cvControl("id_062_WheelPivot", side+self.userGuideName+"_"+self.langDic[self.langName]['c011_RevFoot_B'].capitalize()+"_Ctrl", r=self.ctrlRadius*0.2, d=self.curveDegree, rot=(0, 90, 0))
self.outsideCtrl = self.ctrls.cvControl("id_062_WheelPivot", side+self.userGuideName+"_"+self.langDic[self.langName]['c010_RevFoot_A'].capitalize()+"_Ctrl", r=self.ctrlRadius*0.2, d=self.curveDegree, rot=(0, 90, 0))
self.mainCtrlList.append(self.mainCtrl)
self.wheelCtrlList.append(self.wheelCtrl)
# origined from attributes:
utils.originedFrom(objName=self.mainCtrl, attrString=self.base+";"+self.cvCenterLoc+";"+self.cvFrontLoc+";"+self.radiusGuide)
utils.originedFrom(objName=self.insideCtrl, attrString=self.cvInsideLoc)
utils.originedFrom(objName=self.outsideCtrl, attrString=self.cvOutsideLoc)
# prepare group to receive steering wheel connection:
self.toSteeringGrp = cmds.group(self.insideCtrl, name=side+self.userGuideName+"_"+self.langDic[self.langName]['c070_steering'].capitalize()+"_Grp")
cmds.addAttr(self.toSteeringGrp, longName=self.langDic[self.langName]['c070_steering'], attributeType='bool', keyable=True)
cmds.addAttr(self.toSteeringGrp, longName=self.langDic[self.langName]['c070_steering']+self.langDic[self.langName]['m151_invert'], attributeType='bool', keyable=True)
cmds.setAttr(self.toSteeringGrp+"."+self.langDic[self.langName]['c070_steering'], 1)
self.steeringGrpList.append(self.toSteeringGrp)
# position and orientation of joint and control:
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, self.centerJoint, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvFrontLoc, self.endJoint, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, self.wheelCtrl, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, self.mainCtrl, maintainOffset=False))
cmds.parentConstraint(self.mainCtrl, self.mainJoint, maintainOffset=False, name=self.mainJoint+"_PaC")
cmds.delete(cmds.parentConstraint(self.cvFrontLoc, self.mainEndJoint, maintainOffset=False))
if s == 1 and cmds.getAttr(self.moduleGrp+".flip") == 1:
cmds.move(self.ctrlRadius, self.mainCtrl, moveY=True, relative=True, objectSpace=True, worldSpaceDistance=True)
else:
cmds.move(-self.ctrlRadius, self.mainCtrl, moveY=True, relative=True, objectSpace=True, worldSpaceDistance=True)
cmds.delete(cmds.parentConstraint(self.cvInsideLoc, self.toSteeringGrp, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvOutsideLoc, self.outsideCtrl, maintainOffset=False))
# zeroOut controls:
zeroGrpList = utils.zeroOut([self.mainCtrl, self.wheelCtrl, self.toSteeringGrp, self.outsideCtrl])
wheelAutoGrp = utils.zeroOut([self.wheelCtrl])
wheelAutoGrp = cmds.rename(wheelAutoGrp, side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Auto_Grp")
# fixing flip mirror:
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 1:
for zeroOutGrp in zeroGrpList:
cmds.setAttr(zeroOutGrp+".scaleX", -1)
cmds.setAttr(zeroOutGrp+".scaleY", -1)
cmds.setAttr(zeroOutGrp+".scaleZ", -1)
cmds.addAttr(self.wheelCtrl, longName='scaleCompensate', attributeType="bool", keyable=False)
cmds.setAttr(self.wheelCtrl+".scaleCompensate", 1, channelBox=True)
cmds.connectAttr(self.wheelCtrl+".scaleCompensate", self.centerJoint+".segmentScaleCompensate", force=True)
cmds.addAttr(self.mainCtrl, longName='scaleCompensate', attributeType="bool", keyable=False)
cmds.setAttr(self.mainCtrl+".scaleCompensate", 1, channelBox=True)
cmds.connectAttr(self.mainCtrl+".scaleCompensate", self.mainJoint+".segmentScaleCompensate", force=True)
# hide visibility attributes:
self.ctrls.setLockHide([self.mainCtrl, self.insideCtrl, self.outsideCtrl], ['v'])
self.ctrls.setLockHide([self.wheelCtrl], ['tx', 'ty', 'tz', 'rx', 'ry', 'sx', 'sy', 'sz', 'v'])
# grouping:
cmds.parentConstraint(self.wheelCtrl, self.centerJoint, maintainOffset=False, name=self.centerJoint+"_PaC")
cmds.scaleConstraint(self.wheelCtrl, self.centerJoint, maintainOffset=True, name=self.centerJoint+"_ScC")
cmds.parent(zeroGrpList[1], self.mainCtrl, absolute=True)
cmds.parent(zeroGrpList[0], self.outsideCtrl, absolute=True)
cmds.parent(zeroGrpList[3], self.insideCtrl, absolute=True)
# add attributes:
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c047_autoRotate'], attributeType="bool", defaultValue=1, keyable=True)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c068_startFrame'], attributeType="long", defaultValue=1, keyable=False)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c067_radius'], attributeType="float", min=0.01, defaultValue=self.ctrlRadius, keyable=True)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c069_radiusScale'], attributeType="float", defaultValue=1, keyable=False)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c021_showControls'], attributeType="long", min=0, max=1, defaultValue=0, keyable=True)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c070_steering'], attributeType="bool", defaultValue=0, keyable=True)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['i037_to']+self.langDic[self.langName]['c070_steering'].capitalize(), attributeType="float", defaultValue=0, keyable=False)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c070_steering']+self.langDic[self.langName]['c053_invert'].capitalize(), attributeType="long", min=0, max=1, defaultValue=1, keyable=False)
cmds.addAttr(self.wheelCtrl, longName=self.langDic[self.langName]['c093_tryKeepUndo'], attributeType="long", min=0, max=1, defaultValue=1, keyable=False)
# get stored values by user:
startFrameValue = cmds.getAttr(self.moduleGrp+".startFrame")
steeringValue = cmds.getAttr(self.moduleGrp+".steering")
showControlsValue = cmds.getAttr(self.moduleGrp+".showControls")
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c068_startFrame'], startFrameValue, channelBox=True)
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c070_steering'], steeringValue, channelBox=True)
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c021_showControls'], showControlsValue, channelBox=True)
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c070_steering']+self.langDic[self.langName]['c053_invert'].capitalize(), 1, channelBox=True)
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c093_tryKeepUndo'], 1, channelBox=True)
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 1:
cmds.setAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c070_steering']+self.langDic[self.langName]['c053_invert'].capitalize(), 0)
# automatic rotation wheel setup:
receptSteeringMD = cmds.createNode('multiplyDivide', name=side+self.userGuideName+"_"+self.langDic[self.langName]['c070_steering']+"_MD")
inverseSteeringMD = cmds.createNode('multiplyDivide', name=side+self.userGuideName+"_"+self.langDic[self.langName]['c070_steering']+"_Inv_MD")
steeringInvCnd = cmds.createNode('condition', name=side+self.userGuideName+"_"+self.langDic[self.langName]['c070_steering']+"_Inv_Cnd")
cmds.setAttr(steeringInvCnd+".colorIfTrueR", 1)
cmds.setAttr(steeringInvCnd+".colorIfFalseR", -1)
cmds.connectAttr(self.wheelCtrl+"."+self.langDic[self.langName]['i037_to']+self.langDic[self.langName]['c070_steering'].capitalize(), receptSteeringMD+".input1X", force=True)
cmds.connectAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c070_steering'], receptSteeringMD+".input2X", force=True)
cmds.connectAttr(receptSteeringMD+".outputX", inverseSteeringMD+".input1X", force=True)
cmds.connectAttr(steeringInvCnd+".outColorR", inverseSteeringMD+".input2X", force=True)
cmds.connectAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c070_steering']+self.langDic[self.langName]['c053_invert'].capitalize(), steeringInvCnd+".firstTerm", force=True)
cmds.connectAttr(inverseSteeringMD+".outputX", self.toSteeringGrp+".rotateY", force=True)
# create locators (frontLoc to get direction and oldLoc to store wheel old position):
self.frontLoc = cmds.spaceLocator(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Front_Loc")[0]
self.oldLoc = cmds.spaceLocator(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Old_Loc")[0]
cmds.delete(cmds.parentConstraint(self.cvFrontLoc, self.frontLoc, maintainOffset=False))
cmds.parent(self.frontLoc, self.mainCtrl)
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, self.oldLoc, maintainOffset=False))
cmds.setAttr(self.frontLoc+".visibility", 0, lock=True)
cmds.setAttr(self.oldLoc+".visibility", 0, lock=True)
# this wheel auto group locator could be replaced by a decomposeMatrix to get the translation in world space of the Wheel_Auto_Ctrl_Grp instead:
self.wheelAutoGrpLoc = cmds.spaceLocator(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Auto_Loc")[0]
cmds.pointConstraint(wheelAutoGrp, self.wheelAutoGrpLoc, maintainOffset=False, name=self.wheelAutoGrpLoc+"_PoC")
cmds.setAttr(self.wheelAutoGrpLoc+".visibility", 0, lock=True)
expString = "if ("+self.wheelCtrl+"."+self.langDic[self.langName]['c047_autoRotate']+" == 1) {"+\
"\nif ("+self.wheelCtrl+"."+self.langDic[self.langName]['c093_tryKeepUndo']+" == 1) { undoInfo -stateWithoutFlush 0; };"+\
"\nfloat $radius = "+self.wheelCtrl+"."+self.langDic[self.langName]['c067_radius']+" * "+self.wheelCtrl+"."+self.langDic[self.langName]['c069_radiusScale']+\
";\nvector $moveVectorOld = `xform -q -ws -t \""+self.oldLoc+\
"\"`;\nvector $moveVector = << "+self.wheelAutoGrpLoc+".translateX, "+self.wheelAutoGrpLoc+".translateY, "+self.wheelAutoGrpLoc+".translateZ >>;"+\
"\nvector $dirVector = `xform -q -ws -t \""+self.frontLoc+\
"\"`;\nvector $wheelVector = ($dirVector - $moveVector);"+\
"\nvector $motionVector = ($moveVector - $moveVectorOld);"+\
"\nfloat $distance = mag($motionVector);"+\
"\n$dot = dotProduct($motionVector, $wheelVector, 1);\n"+\
wheelAutoGrp+".rotateZ = "+wheelAutoGrp+".rotateZ - 360 / (6.283*$radius) * ($dot*$distance);"+\
"\nxform -t ($moveVector.x) ($moveVector.y) ($moveVector.z) "+self.oldLoc+\
";\nif (frame == "+self.wheelCtrl+"."+self.langDic[self.langName]['c068_startFrame']+") { "+wheelAutoGrp+".rotateZ = 0; };"+\
"\nif ("+self.wheelCtrl+"."+self.langDic[self.langName]['c093_tryKeepUndo']+" == 1) { undoInfo -stateWithoutFlush 1; };};"
# expression:
cmds.expression(name=side+self.userGuideName+"_"+self.langDic[self.langName]['m156_wheel']+"_Exp", object=self.frontLoc, string=expString)
self.ctrls.setLockHide([self.frontLoc, self.wheelAutoGrpLoc], ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'])
# deformers:
self.loadedGeo = cmds.getAttr(self.moduleGrp+".geo")
# geometry holder:
self.geoHolder = cmds.polyCube(name=side+self.userGuideName+"_"+self.langDic[self.langName]['c046_holder']+"_Geo", constructionHistory=False)[0]
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, self.geoHolder, maintainOffset=False))
cmds.setAttr(self.geoHolder+".visibility", 0, lock=True)
# skinning:
cmds.skinCluster(self.centerJoint, self.geoHolder, toSelectedBones=True, dropoffRate=4.0, maximumInfluences=3, skinMethod=0, normalizeWeights=1, removeUnusedInfluence=False, name=side+self.userGuideName+"_"+self.langDic[self.langName]['c046_holder']+"_SC")
if self.loadedGeo:
if cmds.objExists(self.loadedGeo):
baseName = utils.extractSuffix(self.loadedGeo)
skinClusterName = baseName+"_SC"
if "|" in skinClusterName:
skinClusterName = skinClusterName[skinClusterName.rfind("|")+1:]
try:
cmds.skinCluster(self.centerJoint, self.loadedGeo, toSelectedBones=True, dropoffRate=4.0, maximumInfluences=3, skinMethod=0, normalizeWeights=1, removeUnusedInfluence=False, name=skinClusterName)
except:
childList = cmds.listRelatives(self.loadedGeo, children=True, allDescendents=True)
if childList:
for item in childList:
itemType = cmds.objectType(item)
if itemType == "mesh" or itemType == "nurbsSurface":
try:
skinClusterName = utils.extractSuffix(item)+"_SC"
cmds.skinCluster(self.centerJoint, item, toSelectedBones=True, dropoffRate=4.0, maximumInfluences=3, skinMethod=0, normalizeWeights=1, removeUnusedInfluence=False, name=skinClusterName)
except:
pass
# lattice:
latticeList = cmds.lattice(self.geoHolder, divisions=(6, 6, 6), outsideLattice=2, outsideFalloffDistance=1, position=(0, 0, 0), scale=(self.ctrlRadius*2, self.ctrlRadius*2, self.ctrlRadius*2), name=side+self.userGuideName+"_FFD") #[deformer, lattice, base]
cmds.scale(self.ctrlRadius*2, self.ctrlRadius*2, self.ctrlRadius*2, latticeList[2])
# clusters:
upperClusterList = cmds.cluster(latticeList[1]+".pt[0:5][4:5][0:5]", relative=True, name=side+self.userGuideName+"_"+self.langDic[self.langName]['c044_upper']+"_Cls") #[deform, handle]
middleClusterList = cmds.cluster(latticeList[1]+".pt[0:5][2:3][0:5]", relative=True, name=side+self.userGuideName+"_"+self.langDic[self.langName]['m033_middle']+"_Cls") #[deform, handle]
lowerClusterList = cmds.cluster(latticeList[1]+".pt[0:5][0:1][0:5]", relative=True, name=side+self.userGuideName+"_"+self.langDic[self.langName]['c045_lower']+"_Cls") #[deform, handle]
clusterGrpList = utils.zeroOut([upperClusterList[1], middleClusterList[1], lowerClusterList[1]])
clustersGrp = cmds.group(clusterGrpList, name=side+self.userGuideName+"_Clusters_Grp")
# deform controls:
upperDefCtrl = self.ctrls.cvControl("id_063_WheelDeform", side+self.userGuideName+"_"+self.langDic[self.langName]['c044_upper']+"_Ctrl", r=self.ctrlRadius*0.5, d=self.curveDegree)
middleDefCtrl = self.ctrls.cvControl("id_064_WheelMiddle", side+self.userGuideName+"_"+self.langDic[self.langName]['m033_middle']+"_Ctrl", r=self.ctrlRadius*0.5, d=self.curveDegree)
lowerDefCtrl = self.ctrls.cvControl("id_063_WheelDeform", side+self.userGuideName+"_"+self.langDic[self.langName]['c045_lower']+"_Ctrl", r=self.ctrlRadius*0.5, d=self.curveDegree, rot=(0, 0, 180))
defCtrlGrpList = utils.zeroOut([upperDefCtrl, middleDefCtrl, lowerDefCtrl])
defCtrlGrp = cmds.group(defCtrlGrpList, name=side+self.userGuideName+"_Ctrl_Grp")
# positions:
cmds.delete(cmds.parentConstraint(upperClusterList[1], defCtrlGrpList[0], maintainOffset=False))
cmds.delete(cmds.parentConstraint(middleClusterList[1], defCtrlGrpList[1], maintainOffset=False))
cmds.delete(cmds.parentConstraint(lowerClusterList[1], defCtrlGrpList[2], maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, latticeList[1], maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, latticeList[2], maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, clustersGrp, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.cvCenterLoc, defCtrlGrp, maintainOffset=False))
outsideDist = cmds.getAttr(self.cvOutsideLoc+".tz")
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 1:
outsideDist = -outsideDist
cmds.move(outsideDist, defCtrlGrp, moveZ=True, relative=True, objectSpace=True, worldSpaceDistance=True)
self.ctrls.directConnect(upperDefCtrl, upperClusterList[1])
self.ctrls.directConnect(middleDefCtrl, middleClusterList[1])
self.ctrls.directConnect(lowerDefCtrl, lowerClusterList[1])
# grouping deformers:
if self.loadedGeo:
if cmds.objExists(self.loadedGeo):
cmds.lattice(latticeList[0], edit=True, geometry=self.loadedGeo)
defGrp = cmds.group(latticeList[1], latticeList[2], clustersGrp, name=side+self.userGuideName+"_Deform_Grp")
cmds.parentConstraint(self.mainCtrl, defGrp, maintainOffset=True, name=defGrp+"_PaC")
cmds.scaleConstraint(self.mainCtrl, defGrp, maintainOffset=True, name=defGrp+"_ScC")
cmds.parent(defCtrlGrp, self.mainCtrl)
cmds.connectAttr(self.wheelCtrl+"."+self.langDic[self.langName]['c021_showControls'], defCtrlGrp+".visibility", force=True)
# create a masterModuleGrp to be checked if this rig exists:
self.toCtrlHookGrp = cmds.group(zeroGrpList[2], name=side+self.userGuideName+"_Control_Grp")
self.toScalableHookGrp = cmds.group(self.centerJoint, self.mainJoint, defGrp, name=side+self.userGuideName+"_Joint_Grp")
self.toStaticHookGrp = cmds.group(self.toCtrlHookGrp, self.toScalableHookGrp, self.oldLoc, self.wheelAutoGrpLoc, self.geoHolder, name=side+self.userGuideName+"_Grp")
# add hook attributes to be read when rigging integrated modules:
utils.addHook(objName=self.toCtrlHookGrp, hookType='ctrlHook')
utils.addHook(objName=self.toScalableHookGrp, hookType='scalableHook')
utils.addHook(objName=self.toStaticHookGrp, hookType='staticHook')
cmds.addAttr(self.toStaticHookGrp, longName="dpAR_name", dataType="string")
cmds.addAttr(self.toStaticHookGrp, longName="dpAR_type", dataType="string")
cmds.setAttr(self.toStaticHookGrp+".dpAR_name", self.userGuideName, type="string")
cmds.setAttr(self.toStaticHookGrp+".dpAR_type", CLASS_NAME, type="string")
# add module type counter value
cmds.addAttr(self.toStaticHookGrp, longName='dpAR_count', attributeType='long', keyable=False)
cmds.setAttr(self.toStaticHookGrp+'.dpAR_count', dpAR_count)
self.ctrlHookGrpList.append(self.toCtrlHookGrp)
if hideJoints:
cmds.setAttr(self.toScalableHookGrp+".visibility", 0)
# delete duplicated group for side (mirror):
cmds.delete(side+self.userGuideName+'_'+self.mirrorGrp)
# finalize this rig:
self.integratingInfo()
cmds.select(clear=True)
# delete UI (moduleLayout), GUIDE and moduleInstance namespace:
self.deleteModule()
def integratingInfo(self, *args):
Base.StartClass.integratingInfo(self)
""" This method will create a dictionary with informations about integrations system between modules.
"""
self.integratedActionsDic = {
"module": {
"mainCtrlList" : self.mainCtrlList,
"wheelCtrlList" : self.wheelCtrlList,
"steeringGrpList" : self.steeringGrpList,
"ctrlHookGrpList" : self.ctrlHookGrpList,
}
}
###
#
# Wheel Auto Rotation Expression:
#
# if (WHEEL_CTRL.AUTO_ROTATE == 1) {
# if (WHEEL_CTRL.TRYKEEPUNDO == 1) { undoInfo -stateWithoutFlush 0; };
# float $radius = WHEEL_CTRL.RADIUS * WHEEL_CTRL.RADIUSSCALE;
# vector $moveVectorOld = `xform -q -ws -t "OLD_LOC"`;
# vector $moveVector = << AUTO_GRP_LOC.translateX, AUTO_GRP_LOC.translateY, AUTO_GRP_LOC.translateZ >>;
# vector $dirVector = `xform -q -ws -t "FRONT_LOC"`;
# vector $wheelVector = ($dirVector - $moveVector);
# vector $motionVector = ($moveVector - $moveVectorOld);
# float $distance = mag($motionVector);
# $dot = dotProduct($motionVector, $wheelVector, 1);
# AUTO_GRP.rotateZ = AUTO_GRP.rotateZ - 360 / (6.283*$radius) * ($dot*$distance);
# xform -t ($moveVector.x) ($moveVector.y) ($moveVector.z) OLD_LOC;
# if (frame == WHEEL_CTRL.START_FRAME) { AUTO_GRP.rotateZ = 0; };
# if (WHEEL_CTRL.TRYKEEPUNDO == 1) { undoInfo -stateWithoutFlush 1; };};
#
###
|
gpl-2.0
| 6,195,987,666,483,919,000
| 73.649682
| 272
| 0.622401
| false
| 3.578888
| false
| false
| false
|
Nikola-K/tp_smapi_pyqt
|
design.py
|
1
|
28157
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'design.ui'
#
# Created: Sat Feb 7 18:23:12 2015
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(421, 565)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(3, 5, 3, -1)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.batteryComboBox = QtGui.QComboBox(self.centralwidget)
self.batteryComboBox.setObjectName(_fromUtf8("batteryComboBox"))
self.batteryComboBox.addItem(_fromUtf8(""))
self.batteryComboBox.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.batteryComboBox)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.horizontalLayout.addWidget(self.line)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.installed = QtGui.QLabel(self.centralwidget)
self.installed.setFrameShape(QtGui.QFrame.Box)
self.installed.setFrameShadow(QtGui.QFrame.Plain)
self.installed.setAlignment(QtCore.Qt.AlignCenter)
self.installed.setObjectName(_fromUtf8("installed"))
self.horizontalLayout.addWidget(self.installed)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_25 = QtGui.QLabel(self.centralwidget)
self.label_25.setObjectName(_fromUtf8("label_25"))
self.horizontalLayout_2.addWidget(self.label_25)
self.ac_connected = QtGui.QLabel(self.centralwidget)
self.ac_connected.setFrameShape(QtGui.QFrame.Box)
self.ac_connected.setAlignment(QtCore.Qt.AlignCenter)
self.ac_connected.setObjectName(_fromUtf8("ac_connected"))
self.horizontalLayout_2.addWidget(self.ac_connected)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_3)
self.state = QtGui.QLabel(self.centralwidget)
self.state.setFrameShape(QtGui.QFrame.Box)
self.state.setAlignment(QtCore.Qt.AlignCenter)
self.state.setObjectName(_fromUtf8("state"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.state)
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_4)
self.cycle_count = QtGui.QLabel(self.centralwidget)
self.cycle_count.setFrameShape(QtGui.QFrame.Box)
self.cycle_count.setAlignment(QtCore.Qt.AlignCenter)
self.cycle_count.setObjectName(_fromUtf8("cycle_count"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.cycle_count)
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_5)
self.current_now = QtGui.QLabel(self.centralwidget)
self.current_now.setFrameShape(QtGui.QFrame.Box)
self.current_now.setAlignment(QtCore.Qt.AlignCenter)
self.current_now.setObjectName(_fromUtf8("current_now"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.current_now)
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_6)
self.current_avg = QtGui.QLabel(self.centralwidget)
self.current_avg.setFrameShape(QtGui.QFrame.Box)
self.current_avg.setAlignment(QtCore.Qt.AlignCenter)
self.current_avg.setObjectName(_fromUtf8("current_avg"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.current_avg)
self.label_7 = QtGui.QLabel(self.centralwidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_7)
self.power_now = QtGui.QLabel(self.centralwidget)
self.power_now.setFrameShape(QtGui.QFrame.Box)
self.power_now.setAlignment(QtCore.Qt.AlignCenter)
self.power_now.setObjectName(_fromUtf8("power_now"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.power_now)
self.label_8 = QtGui.QLabel(self.centralwidget)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_8)
self.power_avg = QtGui.QLabel(self.centralwidget)
self.power_avg.setFrameShape(QtGui.QFrame.Box)
self.power_avg.setAlignment(QtCore.Qt.AlignCenter)
self.power_avg.setObjectName(_fromUtf8("power_avg"))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.power_avg)
self.label_9 = QtGui.QLabel(self.centralwidget)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_9)
self.last_full_capacity = QtGui.QLabel(self.centralwidget)
self.last_full_capacity.setFrameShape(QtGui.QFrame.Box)
self.last_full_capacity.setAlignment(QtCore.Qt.AlignCenter)
self.last_full_capacity.setObjectName(_fromUtf8("last_full_capacity"))
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.last_full_capacity)
self.label_10 = QtGui.QLabel(self.centralwidget)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_10)
self.remaining_percent = QtGui.QLabel(self.centralwidget)
self.remaining_percent.setFrameShape(QtGui.QFrame.Box)
self.remaining_percent.setAlignment(QtCore.Qt.AlignCenter)
self.remaining_percent.setObjectName(_fromUtf8("remaining_percent"))
self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.remaining_percent)
self.label_11 = QtGui.QLabel(self.centralwidget)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.formLayout.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_11)
self.remaining_running_time = QtGui.QLabel(self.centralwidget)
self.remaining_running_time.setFrameShape(QtGui.QFrame.Box)
self.remaining_running_time.setAlignment(QtCore.Qt.AlignCenter)
self.remaining_running_time.setObjectName(_fromUtf8("remaining_running_time"))
self.formLayout.setWidget(8, QtGui.QFormLayout.FieldRole, self.remaining_running_time)
self.label_12 = QtGui.QLabel(self.centralwidget)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.formLayout.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_12)
self.remaining_charge_time = QtGui.QLabel(self.centralwidget)
self.remaining_charge_time.setFrameShape(QtGui.QFrame.Box)
self.remaining_charge_time.setAlignment(QtCore.Qt.AlignCenter)
self.remaining_charge_time.setObjectName(_fromUtf8("remaining_charge_time"))
self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.remaining_charge_time)
self.label_13 = QtGui.QLabel(self.centralwidget)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.formLayout.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_13)
self.remaining_capacity = QtGui.QLabel(self.centralwidget)
self.remaining_capacity.setFrameShape(QtGui.QFrame.Box)
self.remaining_capacity.setAlignment(QtCore.Qt.AlignCenter)
self.remaining_capacity.setObjectName(_fromUtf8("remaining_capacity"))
self.formLayout.setWidget(10, QtGui.QFormLayout.FieldRole, self.remaining_capacity)
self.horizontalLayout_4.addLayout(self.formLayout)
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_24 = QtGui.QLabel(self.centralwidget)
self.label_24.setObjectName(_fromUtf8("label_24"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_24)
self.label_14 = QtGui.QLabel(self.centralwidget)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout_2.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_14)
self.label_22 = QtGui.QLabel(self.centralwidget)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.formLayout_2.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_22)
self.label_20 = QtGui.QLabel(self.centralwidget)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.formLayout_2.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_20)
self.label_17 = QtGui.QLabel(self.centralwidget)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_17)
self.label_16 = QtGui.QLabel(self.centralwidget)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_16)
self.label_18 = QtGui.QLabel(self.centralwidget)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_18)
self.label_23 = QtGui.QLabel(self.centralwidget)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_23)
self.label_21 = QtGui.QLabel(self.centralwidget)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_21)
self.label_19 = QtGui.QLabel(self.centralwidget)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_19)
self.label_15 = QtGui.QLabel(self.centralwidget)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_15)
self.design_capacity = QtGui.QLabel(self.centralwidget)
self.design_capacity.setFrameShape(QtGui.QFrame.Box)
self.design_capacity.setAlignment(QtCore.Qt.AlignCenter)
self.design_capacity.setObjectName(_fromUtf8("design_capacity"))
self.formLayout_2.setWidget(10, QtGui.QFormLayout.FieldRole, self.design_capacity)
self.manufacturing_date = QtGui.QLabel(self.centralwidget)
self.manufacturing_date.setFrameShape(QtGui.QFrame.Box)
self.manufacturing_date.setAlignment(QtCore.Qt.AlignCenter)
self.manufacturing_date.setObjectName(_fromUtf8("manufacturing_date"))
self.formLayout_2.setWidget(9, QtGui.QFormLayout.FieldRole, self.manufacturing_date)
self.chemistry = QtGui.QLabel(self.centralwidget)
self.chemistry.setFrameShape(QtGui.QFrame.Box)
self.chemistry.setAlignment(QtCore.Qt.AlignCenter)
self.chemistry.setObjectName(_fromUtf8("chemistry"))
self.formLayout_2.setWidget(8, QtGui.QFormLayout.FieldRole, self.chemistry)
self.manufacturer = QtGui.QLabel(self.centralwidget)
self.manufacturer.setFrameShape(QtGui.QFrame.Box)
self.manufacturer.setAlignment(QtCore.Qt.AlignCenter)
self.manufacturer.setObjectName(_fromUtf8("manufacturer"))
self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.manufacturer)
self.design_voltage = QtGui.QLabel(self.centralwidget)
self.design_voltage.setFrameShape(QtGui.QFrame.Box)
self.design_voltage.setAlignment(QtCore.Qt.AlignCenter)
self.design_voltage.setObjectName(_fromUtf8("design_voltage"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.design_voltage)
self.model = QtGui.QLabel(self.centralwidget)
self.model.setFrameShape(QtGui.QFrame.Box)
self.model.setAlignment(QtCore.Qt.AlignCenter)
self.model.setObjectName(_fromUtf8("model"))
self.formLayout_2.setWidget(5, QtGui.QFormLayout.FieldRole, self.model)
self.first_use_date = QtGui.QLabel(self.centralwidget)
self.first_use_date.setFrameShape(QtGui.QFrame.Box)
self.first_use_date.setAlignment(QtCore.Qt.AlignCenter)
self.first_use_date.setObjectName(_fromUtf8("first_use_date"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.first_use_date)
self.serial = QtGui.QLabel(self.centralwidget)
self.serial.setFrameShape(QtGui.QFrame.Box)
self.serial.setAlignment(QtCore.Qt.AlignCenter)
self.serial.setObjectName(_fromUtf8("serial"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.serial)
self.barcoding = QtGui.QLabel(self.centralwidget)
self.barcoding.setFrameShape(QtGui.QFrame.Box)
self.barcoding.setAlignment(QtCore.Qt.AlignCenter)
self.barcoding.setObjectName(_fromUtf8("barcoding"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.barcoding)
self.voltage = QtGui.QLabel(self.centralwidget)
self.voltage.setFrameShape(QtGui.QFrame.Box)
self.voltage.setAlignment(QtCore.Qt.AlignCenter)
self.voltage.setObjectName(_fromUtf8("voltage"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.voltage)
self.temperature = QtGui.QLabel(self.centralwidget)
self.temperature.setFrameShape(QtGui.QFrame.Box)
self.temperature.setAlignment(QtCore.Qt.AlignCenter)
self.temperature.setObjectName(_fromUtf8("temperature"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.temperature)
self.horizontalLayout_4.addLayout(self.formLayout_2)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setContentsMargins(0, 4, 0, 0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_49 = QtGui.QLabel(self.groupBox)
self.label_49.setObjectName(_fromUtf8("label_49"))
self.horizontalLayout_6.addWidget(self.label_49)
self.start_charge_slider = QtGui.QSlider(self.groupBox)
self.start_charge_slider.setSliderPosition(1)
self.start_charge_slider.setOrientation(QtCore.Qt.Horizontal)
self.start_charge_slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.start_charge_slider.setTickInterval(25)
self.start_charge_slider.setObjectName(_fromUtf8("start_charge_slider"))
self.horizontalLayout_6.addWidget(self.start_charge_slider)
self.start_charge_spinbox = QtGui.QSpinBox(self.groupBox)
self.start_charge_spinbox.setMinimum(1)
self.start_charge_spinbox.setObjectName(_fromUtf8("start_charge_spinbox"))
self.horizontalLayout_6.addWidget(self.start_charge_spinbox)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_50 = QtGui.QLabel(self.groupBox)
self.label_50.setObjectName(_fromUtf8("label_50"))
self.horizontalLayout_5.addWidget(self.label_50)
self.stop_charge_slider = QtGui.QSlider(self.groupBox)
self.stop_charge_slider.setMinimum(1)
self.stop_charge_slider.setMaximum(100)
self.stop_charge_slider.setOrientation(QtCore.Qt.Horizontal)
self.stop_charge_slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.stop_charge_slider.setTickInterval(25)
self.stop_charge_slider.setObjectName(_fromUtf8("stop_charge_slider"))
self.horizontalLayout_5.addWidget(self.stop_charge_slider)
self.stop_charge_spinbox = QtGui.QSpinBox(self.groupBox)
self.stop_charge_spinbox.setMinimum(1)
self.stop_charge_spinbox.setMaximum(100)
self.stop_charge_spinbox.setObjectName(_fromUtf8("stop_charge_spinbox"))
self.horizontalLayout_5.addWidget(self.stop_charge_spinbox)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.label_26 = QtGui.QLabel(self.groupBox)
font = QtGui.QFont()
font.setPointSize(10)
self.label_26.setFont(font)
self.label_26.setWordWrap(True)
self.label_26.setObjectName(_fromUtf8("label_26"))
self.verticalLayout_3.addWidget(self.label_26)
self.line_3 = QtGui.QFrame(self.groupBox)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.verticalLayout_3.addWidget(self.line_3)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.label_27 = QtGui.QLabel(self.groupBox)
self.label_27.setObjectName(_fromUtf8("label_27"))
self.horizontalLayout_8.addWidget(self.label_27)
self.inhibit_charge_slider = QtGui.QSlider(self.groupBox)
self.inhibit_charge_slider.setMaximum(120)
self.inhibit_charge_slider.setOrientation(QtCore.Qt.Horizontal)
self.inhibit_charge_slider.setObjectName(_fromUtf8("inhibit_charge_slider"))
self.horizontalLayout_8.addWidget(self.inhibit_charge_slider)
self.inhibit_charge_spinbox = QtGui.QSpinBox(self.groupBox)
self.inhibit_charge_spinbox.setMaximum(120)
self.inhibit_charge_spinbox.setObjectName(_fromUtf8("inhibit_charge_spinbox"))
self.horizontalLayout_8.addWidget(self.inhibit_charge_spinbox)
self.verticalLayout_3.addLayout(self.horizontalLayout_8)
self.label_28 = QtGui.QLabel(self.groupBox)
self.label_28.setWordWrap(True)
self.label_28.setObjectName(_fromUtf8("label_28"))
self.verticalLayout_3.addWidget(self.label_28)
self.verticalLayout.addWidget(self.groupBox)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout.addWidget(self.line_2)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.btn_reload = QtGui.QPushButton(self.centralwidget)
self.btn_reload.setObjectName(_fromUtf8("btn_reload"))
self.horizontalLayout_7.addWidget(self.btn_reload)
self.btn_write = QtGui.QPushButton(self.centralwidget)
self.btn_write.setObjectName(_fromUtf8("btn_write"))
self.horizontalLayout_7.addWidget(self.btn_write)
self.verticalLayout.addLayout(self.horizontalLayout_7)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.start_charge_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.start_charge_spinbox.setValue)
QtCore.QObject.connect(self.stop_charge_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.stop_charge_spinbox.setValue)
QtCore.QObject.connect(self.start_charge_spinbox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.start_charge_slider.setValue)
QtCore.QObject.connect(self.stop_charge_spinbox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.stop_charge_slider.setValue)
QtCore.QObject.connect(self.inhibit_charge_slider, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.inhibit_charge_spinbox.setValue)
QtCore.QObject.connect(self.inhibit_charge_spinbox, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.inhibit_charge_slider.setValue)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label.setText(_translate("MainWindow", "Battery:", None))
self.batteryComboBox.setItemText(0, _translate("MainWindow", "BAT0", None))
self.batteryComboBox.setItemText(1, _translate("MainWindow", "BAT1", None))
self.label_2.setText(_translate("MainWindow", "Battery Installed:", None))
self.installed.setText(_translate("MainWindow", "N/A", None))
self.label_25.setText(_translate("MainWindow", "AC Connected:", None))
self.ac_connected.setText(_translate("MainWindow", "N/A", None))
self.label_3.setText(_translate("MainWindow", "State:", None))
self.state.setText(_translate("MainWindow", "N/A", None))
self.label_4.setText(_translate("MainWindow", "Cycle Count:", None))
self.cycle_count.setText(_translate("MainWindow", "N/A", None))
self.label_5.setText(_translate("MainWindow", "Current Now:", None))
self.current_now.setText(_translate("MainWindow", "N/A", None))
self.label_6.setText(_translate("MainWindow", "Current Avg.:", None))
self.current_avg.setText(_translate("MainWindow", "N/A", None))
self.label_7.setText(_translate("MainWindow", "Power Now:", None))
self.power_now.setText(_translate("MainWindow", "N/A", None))
self.label_8.setText(_translate("MainWindow", "Power Avg.:", None))
self.power_avg.setText(_translate("MainWindow", "N/A", None))
self.label_9.setText(_translate("MainWindow", "Last Full Capacity:", None))
self.last_full_capacity.setText(_translate("MainWindow", "N/A", None))
self.label_10.setText(_translate("MainWindow", "Remaining Percent:", None))
self.remaining_percent.setText(_translate("MainWindow", "N/A", None))
self.label_11.setText(_translate("MainWindow", "Rem. Running Time:", None))
self.remaining_running_time.setText(_translate("MainWindow", "N/A", None))
self.label_12.setText(_translate("MainWindow", "Rem. Charge Time:", None))
self.remaining_charge_time.setText(_translate("MainWindow", "N/A", None))
self.label_13.setText(_translate("MainWindow", "Remaining Capacity:", None))
self.remaining_capacity.setText(_translate("MainWindow", "N/A", None))
self.label_24.setText(_translate("MainWindow", "Temperature:", None))
self.label_14.setText(_translate("MainWindow", "Design Capacity:", None))
self.label_22.setText(_translate("MainWindow", "Manufacturing Date:", None))
self.label_20.setText(_translate("MainWindow", "Chemistry:", None))
self.label_17.setText(_translate("MainWindow", "Manufacturer:", None))
self.label_16.setText(_translate("MainWindow", "Design Voltage:", None))
self.label_18.setText(_translate("MainWindow", "Model:", None))
self.label_23.setText(_translate("MainWindow", "First Use Date:", None))
self.label_21.setText(_translate("MainWindow", "Serial:", None))
self.label_19.setText(_translate("MainWindow", "Barcoding:", None))
self.label_15.setText(_translate("MainWindow", "Voltage:", None))
self.design_capacity.setText(_translate("MainWindow", "N/A", None))
self.manufacturing_date.setText(_translate("MainWindow", "N/A", None))
self.chemistry.setText(_translate("MainWindow", "N/A", None))
self.manufacturer.setText(_translate("MainWindow", "N/A", None))
self.design_voltage.setText(_translate("MainWindow", "N/A", None))
self.model.setText(_translate("MainWindow", "N/A", None))
self.first_use_date.setText(_translate("MainWindow", "N/A", None))
self.serial.setText(_translate("MainWindow", "N/A", None))
self.barcoding.setText(_translate("MainWindow", "N/A", None))
self.voltage.setText(_translate("MainWindow", "N/A", None))
self.temperature.setText(_translate("MainWindow", "N/A", None))
self.groupBox.setTitle(_translate("MainWindow", "Charging Options", None))
self.label_49.setText(_translate("MainWindow", "Start Charge Thresh:", None))
self.label_50.setText(_translate("MainWindow", "Stop Charge Thresh:", None))
self.label_26.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-family:\'sans-serif\'; font-size:8pt; font-weight:600; color:#252525; background-color:#d5f0d0;\">Hint:</span><span style=\" font-family:\'sans-serif\'; font-size:8pt; color:#000000; background-color:#d5f0d0;\"/><span style=\" font-family:\'sans-serif\'; font-size:8pt; color:#252525;\">Battery charging thresholds can be used to keep Li-Ion and Li-Polymer batteries partially charged, in order to </span><a href=\"http://www.thinkwiki.org/wiki/Maintenance#Battery_treatment\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">increase their lifetime</span></a><span style=\" font-family:\'sans-serif\'; font-size:8pt; color:#252525;\">.</span></p></body></html>", None))
self.label_27.setText(_translate("MainWindow", "Inhibit Charge (min.): ", None))
self.label_28.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-family:\'sans-serif\'; font-size:8pt; font-weight:600; color:#252525; background-color:#d5f0d0;\">Hint:</span><span style=\" font-family:\'sans-serif\'; font-size:8pt; color:#252525;\">Charge inhibiting can be used to reduce the power draw of the laptop, in order to use an under-spec power supply that can\'t handle the combined power draw of running and charging. It can also be used to control which battery is charged when </span><a href=\"http://www.thinkwiki.org/wiki/How_to_use_UltraBay_batteries\"><span style=\" font-size:8pt; text-decoration: underline; color:#0000ff;\">using an Ultrabay battery</span></a><span style=\" font-family:\'sans-serif\'; font-size:8pt; color:#252525;\">.</span></p></body></html>", None))
self.btn_reload.setText(_translate("MainWindow", "Reload Settings", None))
self.btn_write.setText(_translate("MainWindow", "Write Settings", None))
|
mit
| 4,475,475,535,524,859,400
| 66.358852
| 825
| 0.707949
| false
| 3.637726
| false
| false
| false
|
brunoabud/ic
|
gui/vertical_scroll_area.py
|
1
|
1965
|
# coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtGui import QScrollArea
from PyQt4.QtCore import QSize, QEvent
class VerticalScrollArea(QScrollArea):
"""QScrollArea optimized for a vertical Scrolling Area."""
def __init__(self, *args, **kwargs):
super(VerticalScrollArea, self).__init__(*args, **kwargs)
self.setMinimumSize(QSize(100, 100))
def _updateWidth(self):
total_width = self.widget().minimumSizeHint().width()
total_width += self.frameWidth()
total_width += self.verticalScrollBar().sizeHint().width() if self.verticalScrollBar().isVisible() else 0
self.setMinimumWidth(total_width)
self.setMaximumWidth(total_width)
def setWidget(self, widget):
if self.widget() is not None:
self.widget().removeEventFilter(self)
widget.installEventFilter(self)
super(VerticalScrollArea, self).setWidget(widget)
def eventFilter(self, obj, event):
if obj is self.widget() and (event.type() == QEvent.Resize or
event.type() == QEvent.ChildAdded or
event.type() == QEvent.ChildRemoved):
self._updateWidth()
return False
def resizeEvent(self, event):
self._updateWidth()
super(VerticalScrollArea, self).resizeEvent(event)
|
gpl-3.0
| 2,268,670,650,056,277,000
| 40.765957
| 113
| 0.695364
| false
| 3.910359
| false
| false
| false
|
explosion/ml-datasets
|
ml_datasets/loaders/dbpedia.py
|
1
|
1090
|
from pathlib import Path
import csv
import random
from ..util import get_file
from .._registry import register_loader
# DBPedia Ontology from https://course.fast.ai/datasets
DBPEDIA_ONTOLOGY_URL = "https://s3.amazonaws.com/fast-ai-nlp/dbpedia_csv.tgz"
@register_loader("dbpedia")
def dbpedia(loc=None, *, train_limit=0, dev_limit=0):
if loc is None:
loc = get_file("dbpedia_csv", DBPEDIA_ONTOLOGY_URL, untar=True, unzip=True)
train_loc = Path(loc) / "train.csv"
test_loc = Path(loc) / "test.csv"
return (
read_dbpedia_ontology(train_loc, limit=train_limit),
read_dbpedia_ontology(test_loc, limit=dev_limit),
)
def read_dbpedia_ontology(data_file, *, limit=0):
examples = []
with open(data_file, newline="", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
label = row[0]
title = row[1]
text = row[2]
examples.append((title + "\n" + text, label))
random.shuffle(examples)
if limit >= 1:
examples = examples[:limit]
return examples
|
mit
| -6,058,549,363,968,170,000
| 28.459459
| 83
| 0.62844
| false
| 3.087819
| false
| false
| false
|
asadoughi/python-neutronclient
|
neutronclient/neutron/v2_0/vpn/vpnservice.py
|
1
|
3371
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
#
import logging
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.openstack.common.gettextutils import _
class ListVPNService(neutronv20.ListCommand):
"""List VPNService configurations that belong to a given tenant."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.ListVPNService')
list_columns = [
'id', 'name', 'router_id', 'status'
]
_formatters = {}
pagination_support = True
sorting_support = True
class ShowVPNService(neutronv20.ShowCommand):
"""Show information of a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.ShowVPNService')
class CreateVPNService(neutronv20.CreateCommand):
"""Create a VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.CreateVPNService')
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false'))
parser.add_argument(
'--name',
help=_('Set a name for the vpnservice'))
parser.add_argument(
'--description',
help=_('Set a description for the vpnservice'))
parser.add_argument(
'router', metavar='ROUTER',
help=_('Router unique identifier for the vpnservice'))
parser.add_argument(
'subnet', metavar='SUBNET',
help=_('Subnet unique identifier for the vpnservice deployment'))
def args2body(self, parsed_args):
_subnet_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'subnet',
parsed_args.subnet)
_router_id = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'router',
parsed_args.router)
body = {self.resource: {'subnet_id': _subnet_id,
'router_id': _router_id,
'admin_state_up': parsed_args.admin_state}, }
neutronv20.update_dict(parsed_args, body[self.resource],
['name', 'description',
'tenant_id'])
return body
class UpdateVPNService(neutronv20.UpdateCommand):
"""Update a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.UpdateVPNService')
class DeleteVPNService(neutronv20.DeleteCommand):
"""Delete a given VPNService."""
resource = 'vpnservice'
log = logging.getLogger(__name__ + '.DeleteVPNService')
|
apache-2.0
| 8,309,112,606,520,467,000
| 33.050505
| 78
| 0.627707
| false
| 4.051683
| false
| false
| false
|
Brian151/OpenShockwave
|
tools/imports/shockabsorber/model/cast.py
|
1
|
1698
|
class CastLibraryTable: #------------------------------
def __init__(self, castlibs):
self.by_nr = {}
self.by_assoc_id = {}
for cl in castlibs:
self.by_nr[cl.nr] = cl
if cl.assoc_id>0:
self.by_assoc_id[cl.assoc_id] = cl
def iter_by_nr(self):
return self.by_nr.itervalues()
def get_cast_library(self, lib_nr):
return self.by_nr[lib_nr]
def get_cast_member(self, lib_nr, member_nr):
cast_lib = self.by_nr[lib_nr]
return cast_lib.get_cast_member(member_nr) if cast_lib != None else None
#--------------------------------------------------
class CastLibrary: #------------------------------
def __init__(self, nr, name, path, assoc_id, idx_range, self_idx):
self.nr = nr
self.name = name
self.path = path
self.assoc_id = assoc_id
self.idx_range = idx_range
self.self_idx = self_idx
self.castmember_table = None
def __repr__(self):
return "<CastLibrary #%d name=\"%s\" size=%d>" % (self.nr, self.name,
len(self.castmember_table) if self.castmember_table != None else -1)
def get_path(self): return self.path
def castmember_table_is_set(self): return self.castmember_table != None
def get_castmember_table(self): return self.castmember_table
def set_castmember_table(self,table):
self.castmember_table = table
def get_cast_member(self, member_nr):
if self.castmember_table == None: return None # TODO: Ensure loaded
return self.castmember_table[member_nr-1]
#--------------------------------------------------
|
apache-2.0
| 846,849,474,389,359,700
| 36.733333
| 126
| 0.526502
| false
| 3.675325
| false
| false
| false
|
rande/python-element
|
element/plugins/seo/seo.py
|
1
|
3479
|
#
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import element.node
from ioc.helper import deepcopy
class SeoPage(object):
def __init__(self, title_pattern="%s", metas=None, keywords=None):
self.title_pattern = title_pattern
self.metas = metas or {}
self.keywords = keywords or []
class SeoListener(object):
def __init__(self, seo_page):
self.seo_page = seo_page
def listener(self, event):
"""
listen to element.seo.headers event and return a node with seo information only
subject should be a NodeContext object
"""
if not event.has('subject'):
return
node = element.node.Node('seo://%s' % event.get('subject').id, {
'type': 'seo.headers',
'seo': self.build_seo(event.get('subject')),
})
event.set('node', node)
def build_seo(self, context):
"""
build the seo information from the provide context
"""
seo = {
'title': None,
'metas': {}
}
self.configure_title(context, seo)
self.configure_metas(context, seo)
return seo
def get_title(self, title):
return self.seo_page.title_pattern % title
def configure_title(self, context, seo):
if 'seo' in context.settings and 'title' in context.settings['seo']:
seo['title'] = self.get_title(context.settings['seo']['title'])
return
for field in ['title', 'name']:
if context[field]:
seo['title'] = self.get_title(context[field])
return
# no title defined!
seo['title'] = self.get_title(u"\u2605")
def configure_metas(self, context, seo):
if 'seo' not in context.settings or 'metas' not in context.settings['seo']:
seo['metas'] = deepcopy(self.seo_page.metas)
return
if 'metas' in context.settings['seo']:
seo['metas'] = deepcopy(context.settings['seo']['metas'])
for pname, pmetas in deepcopy(self.seo_page.metas).iteritems():
if pname not in seo['metas']:
seo['metas'][pname] = pmetas
continue
# merge values
for mname, mvalue in pmetas.iteritems():
if mname not in seo['metas'][pname]:
seo['metas'][pname][mname] = mvalue
class SeoHandler(element.node.NodeHandler):
def __init__(self, templating):
self.templating = templating
def get_defaults(self, node):
return {
'template': 'element.plugins.seo:headers.html'
}
def get_name(self):
return 'Seo'
def execute(self, request_handler, context):
return self.render(request_handler, self.templating, context.settings['template'], {
'context': context,
'seo': context.seo
})
|
apache-2.0
| -5,339,116,559,207,831,000
| 30.342342
| 92
| 0.5904
| false
| 3.835722
| false
| false
| false
|
Kongsea/tensorflow
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
1
|
6703
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"do_not_prune=[conv1,conv2]", "sparsity_function_end_step=100",
"target_sparsity=0.9"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertAllEqual(p._spec.do_not_prune, ["conv1", "conv2"])
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
def testInitWithExternalSparsity(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.test_session():
weights = variables.Variable(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.test_session() as session:
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.5, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.test_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.Variable(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.test_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -7,917,016,903,297,070,000
| 40.376543
| 80
| 0.687453
| false
| 3.529753
| true
| false
| false
|
praekelt/go-contacts-api
|
go_contacts/handlers/contacts_for_group.py
|
1
|
1590
|
from cyclone.web import HTTPError
from go_api.cyclone.handlers import BaseHandler
from go_api.collections.errors import (
CollectionUsageError, CollectionObjectNotFound)
from twisted.internet.defer import maybeDeferred
class ContactsForGroupHandler(BaseHandler):
"""
Handler for getting all contacts for a group
Methods supported:
* ``GET /:group_id/contacts`` - retrieve all contacts of a group.
"""
route_suffix = ":group_id/contacts"
model_alias = "collection"
def get(self, group_id):
query = self.get_argument('query', default=None)
stream = self.get_argument('stream', default='false')
if stream == 'true':
d = maybeDeferred(self.collection.stream, group_id, query)
d.addCallback(self.write_queue)
else:
cursor = self.get_argument('cursor', default=None)
max_results = self.get_argument('max_results', default=None)
try:
max_results = max_results and int(max_results)
except ValueError:
raise HTTPError(400, "max_results must be an integer")
d = maybeDeferred(
self.collection.page, group_id, cursor=cursor,
max_results=max_results, query=query)
d.addCallback(self.write_page)
d.addErrback(self.catch_err, 404, CollectionObjectNotFound)
d.addErrback(self.catch_err, 400, CollectionUsageError)
d.addErrback(
self.raise_err, 500,
"Failed to retrieve contacts for group %r." % group_id)
return d
|
bsd-3-clause
| 5,880,230,675,900,789,000
| 35.976744
| 72
| 0.633962
| false
| 4.140625
| false
| false
| false
|
sdague/gatemine
|
gatemine/results.py
|
1
|
1881
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DEFAULT_PAGE_SIZE = 100
class ESQuery(object):
"""An encapsulation of an elastic search query"""
query = ""
pagesize = DEFAULT_PAGE_SIZE
page = 0
def __init__(self, query, size=DEFAULT_PAGE_SIZE):
self.query = query
self.pagesize = size
class ResultSet(object):
"""A container for results"""
results = []
pagesize = DEFAULT_PAGE_SIZE
page = 0
took = 0
timed_out = False
size = 0
def __init__(self, data):
self.results = []
self._parse(data)
def _parse(self, data):
self.took = data['took']
self.timed_out = data['timed_out']
self.size = data['hits']['total']
for r in data['hits']['hits']:
self.results.append(Result(r))
def next(self, pagesize=None):
"""Eventually used to load the next page of results"""
pass
def __iter__(self):
return iter(self.results)
class Result(object):
"""A single log stash result"""
def __init__(self, data):
self._data = data
def __str__(self):
return str(self._data)
|
apache-2.0
| -6,920,318,627,922,871,000
| 26.26087
| 78
| 0.636364
| false
| 3.90249
| false
| false
| false
|
nealp9084/hw3
|
negotiator_framework.py
|
1
|
6109
|
from csv import DictReader
import csv
from sys import argv, exit
# from negotiator import Negotiator
# from door_in_face_negotiator import DoorInFaceNegotiator
# from greedy_negotiator import GreedyNegotiator
# from door_in_face_dummy import DIFDummyNegotiator
# from negotiator_a import Negotiator_A
from random import seed, randint
from nap7jz import Negotiator as Nap7jz
# read_scenario(parameterfile_name : String) --> (int, list(dict))
# Utility function to read in a single scenario from a csv file
# Expects a single int on the first line, specifying the iteration limit,
# and then an arbitrary number of rows of three comma-separated columns,
# specifying the name of each item, its rank (where 1 is best) for negotiator A,
# and the same for negotiator B
def read_scenario(parameterfile_name):
# Open the file for reading
with open(parameterfile_name, 'r') as parameterfile:
# Consume the first line, getting the iteration limit
number_iterations = parameterfile.readline()
return (
int(number_iterations),
# Use Python's builtin CSV reader to read the rest of the file as specified
list(DictReader(parameterfile, fieldnames=["item_name", "negotiator_a", "negotiator_b"]))
)
# negotiate(num_iterations : Int, negotiator_a : BaseNegotiator, negotiator_b : BaseNegotiator) --> (Boolean, list(String), Int)
# The main negotiation function, responsible for running a single scenario & coordinating interactions between the two
# negotiators.
def negotiate(num_iterations, negotiator_a, negotiator_b):
# Get the initial offer from negotiator a - we pass in None to signify that no previous opposing offers have been made
(offer_a, offer_b) = (negotiator_a.make_offer(None), None)
# We scale the reported utility by a random factor
a_scale = randint(1, 11)
b_scale = randint(1, 11)
#print("scales are %f %f" % (a_scale, b_scale))
# Keep trading offers until we reach an agreement or the iteration limit, whichever comes first
for i in range(num_iterations):
print(offer_a, offer_b)
# Get from each negotiator the utility it received from the offer it most recently gave
utility = (a_scale * negotiator_a.utility(), b_scale * negotiator_b.utility())
# Send b the latest offer from a and allow it to rebut
negotiator_b.receive_utility(utility[0])
offer_b = negotiator_b.make_offer(offer_a)
# We signify agreement by both offers being structurally equal
if offer_a == offer_b:
return (True, offer_a, i)
# If we didn't agree, let a respond to b's offer, recalculate utility and send 'a' the info
utility = (a_scale * negotiator_a.utility(), b_scale * negotiator_b.utility())
negotiator_a.receive_utility(utility[1])
offer_a = negotiator_a.make_offer(offer_b)
if offer_a == offer_b:
return (True, offer_a, i)
# If we failed overall, then there's no ordering to return
return (False, None, num_iterations)
if __name__ == "__main__":
# We can't run without at least one scenario. We can, however, run with multiple provided scenarios
if len(argv) < 2:
print("Please provide at least one scenario file, in csv format.")
exit(-42)
score_a = score_b = 0
# We will replace Negotiator here with <your id>_Negotiator, as specified in the Readme
negotiator_a = Nap7jz()
negotiator_b = Nap7jz()
count = randint(0,1)
for scenario in argv[1:]:
# Get the scenario parameters
(num_iters, mapping) = read_scenario(scenario)
# Separate the mapping out for each negotiator, and sort the items from it into a list
# based upon the preferences of each negotiator
a_mapping = {item["item_name"] : int(item["negotiator_a"]) for item in mapping}
a_order = sorted(a_mapping, key=a_mapping.get, reverse=True)
b_mapping = {item["item_name"] : int(item["negotiator_b"]) for item in mapping}
b_order = sorted(b_mapping, key=b_mapping.get, reverse=True)
# Give each negotiator their preferred item ordering
negotiator_a.initialize(a_order, num_iters)
negotiator_b.initialize(b_order, num_iters)
# Get the result of the negotiation and SWAP TURNS
if count%2 == 0:
print("A (random) is going first")
print("A's prefs: " + str(negotiator_a.preferences))
print("B's prefs: " + str(negotiator_b.preferences))
(result, order, count) = negotiate(num_iters, negotiator_a, negotiator_b)
else:
print("B (you) going first (so your offers are the first column")
print("A's prefs: " + str(negotiator_a.preferences))
print("B's prefs: " + str(negotiator_b.preferences))
(result, order, count) = negotiate(num_iters, negotiator_b, negotiator_a)
# Assign points to each negotiator. Note that if the negotiation failed, each negotiatior receives a negative penalty
# However, it is also possible in a "successful" negotiation for a given negotiator to receive negative points
(points_a, points_b) = (negotiator_a.utility(), negotiator_b.utility()) if result else (-len(a_order), -len(b_order))
results = (result, points_a, points_b, count)
score_a += points_a
score_b += points_b
# Update each negotiator with the final result, points assigned, and number of iterations taken to reach an agreement
negotiator_a.receive_results(results)
negotiator_b.receive_results(results)
print("{} negotiation:\n\tNegotiator A: {}\n\tNegotiator B: {}".format("Successful" if result else "Failed", points_a, points_b))
#swap turns.
count = count + 1
print("Final result:\n\tNegotiator A (random one): {}\n\tNegotiator B: (us) {}".format(score_a, score_b))
|
mit
| -8,557,401,489,509,834,000
| 50.336134
| 141
| 0.650188
| false
| 3.825297
| false
| false
| false
|
cherishing78/BSVer
|
Yintest/noloop.py
|
1
|
3693
|
import numpy as np
def Initial_diag(dim):
conv=np.diag(np.random.rand(dim))
return conv
def Convergence(matrix):
delta=(np.abs(matrix).max(axis=0)).max(axis=0)
return delta
def Train(trainingset,label):
(imagenum,dim)=trainingset.shape
#Each column vector stands for a image.
dataset=np.transpose(trainingset)
label.shape=(-1,)
peoplenum=label[-1]
m=np.zeros(peoplenum,dtype=np.uint16)
#m[i] stands for the num of images the i th people has.
#The elements in label start with 1.
for i in label:
m[i-1]+=1
#Delete the repetitive elements and get the m_set list.
m_set=set(list(m))
m_max=max(m_set)
print '------ m_set Accomplished ------'
print m_set
#Initialization
Su=Initial_diag(dim)
Se=Initial_diag(dim)
print '------ Initialization Accomplished ------'
#Iteration
epsilon=1e-4
Delta_Su=Su
Delta_Se=Se
Iter=0
Delta=max(Convergence(Delta_Su),Convergence(Delta_Se))
print '------ Training Process ------'
while Delta>epsilon:
print '------ Delta=%f in %dth Iteration------'%(Delta,Iter)
#Compute the F and all kinds of G in each iteration time.
F=np.linalg.pinv(Se)
#In case there is no people has m[k] images.
G_class=[0 for i in range(m_max)]
for i in range(1,m_max+1):
if i in m_set:
#Compute various G in advance for the sake of convenience.
G_class[i-1]=-np.dot(np.linalg.pinv((i+1)*Su+Se),np.dot(Su,F))
print '------ G_class[%d] Accopmlished in the %dth Iteration ------'%(i-1,Iter)
#Compute u[i] for each person and e[i,j] for each image.
#Initialize the Pointer of each person's images.
m_index=0
Su_new=0
Se_new=0
print '------ Compute the Su_new an Se_new in %dth Iteration'%Iter
for i in range(peoplenum):
u=0
e=0
#Compute the constant term for e[i,j].
constant=0
for j in range(m_index,m_index+m[i]):
constant+=np.dot(Se,np.dot(G_class[m[i]-1],dataset[:,j]))
#Compute the Su_new and Se_new
for j in range(m_index,m_index+m[i]):
u+=np.dot(Su,np.dot((F+(m[i]+1)*G_class[m[i]-1]),dataset[:,j]))
eij=np.dot(Se,dataset[:,j])+constant
Se_new+=np.dot(eij,np.transpose(eij))/m[i]/peoplenum
Su_new+=np.dot(u,np.transpose(u))/peoplenum
#Pointer move on.
m_index+=m[i]
Delta_Su=Su_new.__sub__(Su)
Delta_Se=Se_new.__sub__(Se)
Delta=max(Convergence(Delta_Su),Convergence(Delta_Se))
Su=Su_new
Se=Se_new
print '------ %dth iteration accomlished ------'%Iter
Iter+=1
if Iter>10:
break
#Get the matrix in need.
F=np.linalg.pinv(Se)
#Save the memory.
if 1 not in m_set:
G_class[0]=-np.dot(np.dot(np.linalg.pinv(2*Su+Se),Su),F)
A=np.linalg.pinv(Su+Se)-F-G_class[0]
return A,G
def Noloop(trainingset,label):
(imagenum,dim)=trainingset.shape
#Each column vector stands for a image.
#For the dim aligning.
trainingset.shape=(imagenum,dim,1)
label.shape=(-1,)
peoplenum=label[-1]
m=np.zeros(peoplenum,dtype=np.uint16)
#m[i] stands for the num of images the i th people has.
#The elements in label start with 1.
for i in label:
m[i-1]+=1
#Delete the repetitive elements and get the m_set list.
m_set=set(list(m))
m_max=max(m_set)
print '------ m_set Accomplished ------'
print m_set
m_index=0
print '------ Compute Su ------'
Su=0
Se=0
for i in range(peoplenum):
u=0
e=0
for j in range(m_index,m_index+m[i]):
u+=trainingset[j]/m[i]
for j in range(m_index,m_index+m[i]):
Se+=np.dot((trainingset[j]-u),np.transpose(trainingset[j]-u))/m[i]/(peoplenum-1)
Su+=np.dot(u,np.transpose(u))/(peoplenum-1)
return Su,Se
def Verify(A,G,x1,x2):
x1.shape=(-1,1)
x2.shape=(-1,1)
ratio=np.dot(np.dot(np.transpose(x1),A),x1)+np.dot(np.dot(np.transpose(x2),A),x2)-2*np.dot(np.dot(np.transpose(x1),G),x2)
return ratio
|
bsd-3-clause
| 7,908,429,308,505,981,000
| 29.02439
| 122
| 0.655835
| false
| 2.365791
| false
| false
| false
|
psychopy/psychopy
|
psychopy/experiment/components/joyButtons/virtualJoyButtons.py
|
1
|
1281
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# Support for fake joystick/gamepad during development
# if no 'real' joystick/gamepad is available use keyboard emulation
# 'ctrl' + 'alt' + numberKey
from __future__ import absolute_import, division, print_function
from psychopy import event
class VirtualJoyButtons(object):
def __init__(self, device_number):
self.device_number = device_number
self.numberKeys=['0','1','2','3','4','5','6','7','8','9']
self.modifierKeys=['ctrl','alt']
self.mouse = event.Mouse()
event.Mouse(visible=False)
def getNumButtons(self):
return(len(self.numberKeys))
def getAllButtons(self):
keys = event.getKeys(keyList=self.numberKeys, modifiers=True)
values = [key for key, modifiers in keys if all([modifiers[modKey] for modKey in self.modifierKeys])]
self.state = [key in values for key in self.numberKeys]
mouseButtons = self.mouse.getPressed()
self.state[:len(mouseButtons)] = [a or b != 0 for (a,b) in zip(self.state, mouseButtons)]
return(self.state)
|
gpl-3.0
| -8,302,396,992,503,322,000
| 39.03125
| 109
| 0.67057
| false
| 3.519231
| false
| false
| false
|
deepmind/reverb
|
reverb/client_test.py
|
1
|
17392
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for python client."""
import collections
import multiprocessing.dummy as multithreading
import pickle
from absl.testing import absltest
import numpy as np
from reverb import client
from reverb import errors
from reverb import item_selectors
from reverb import rate_limiters
from reverb import server
import tensorflow.compat.v1 as tf
import tree
TABLE_NAME = 'table'
NESTED_SIGNATURE_TABLE_NAME = 'nested_signature_table'
SIMPLE_QUEUE_NAME = 'simple_queue'
QUEUE_SIGNATURE = {
'a': tf.TensorSpec(dtype=tf.int64, shape=(3,)),
'b': tf.TensorSpec(dtype=tf.float32, shape=(3, 2, 2)),
}
class ClientTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.server = server.Server(
tables=[
server.Table(
name=TABLE_NAME,
sampler=item_selectors.Prioritized(1),
remover=item_selectors.Fifo(),
max_size=1000,
rate_limiter=rate_limiters.MinSize(3),
signature=tf.TensorSpec(dtype=tf.int64, shape=[]),
),
server.Table.queue(
name=NESTED_SIGNATURE_TABLE_NAME,
max_size=10,
signature=QUEUE_SIGNATURE,
),
server.Table.queue(SIMPLE_QUEUE_NAME, 10),
],
port=None)
cls.client = client.Client(f'localhost:{cls.server.port}')
def tearDown(self):
self.client.reset(TABLE_NAME)
self.client.reset(NESTED_SIGNATURE_TABLE_NAME)
self.client.reset(SIMPLE_QUEUE_NAME)
super().tearDown()
@classmethod
def tearDownClass(cls):
cls.server.stop()
super().tearDownClass()
def _get_sample_frequency(self, n=10000):
keys = [sample[0].info.key for sample in self.client.sample(TABLE_NAME, n)]
counter = collections.Counter(keys)
return [count / n for _, count in counter.most_common()]
def test_sample_sets_table_size(self):
for i in range(1, 11):
self.client.insert(i, {TABLE_NAME: 1.0})
if i >= 3:
sample = next(self.client.sample(TABLE_NAME, 1))[0]
self.assertEqual(sample.info.table_size, i)
def test_sample_sets_probability(self):
for i in range(1, 11):
self.client.insert(i, {TABLE_NAME: 1.0})
if i >= 3:
sample = next(self.client.sample(TABLE_NAME, 1))[0]
self.assertAlmostEqual(sample.info.probability, 1.0 / i, 0.01)
def test_sample_sets_priority(self):
# Set the test context by manually mutating priorities to known ones.
for i in range(10):
self.client.insert(i, {TABLE_NAME: 1000.0})
def _sample_priorities(n=100):
return {
sample[0].info.key: sample[0].info.priority
for sample in self.client.sample(TABLE_NAME, n)
}
original_priorities = _sample_priorities(n=100)
self.assertNotEmpty(original_priorities)
self.assertSequenceAlmostEqual([1000.0] * len(original_priorities),
original_priorities.values())
expected_priorities = {
key: float(i) for i, key in enumerate(original_priorities)
}
self.client.mutate_priorities(TABLE_NAME, updates=expected_priorities)
# Resample and check priorities.
sampled_priorities = _sample_priorities(n=100)
self.assertNotEmpty(sampled_priorities)
for key, priority in sampled_priorities.items():
if key in expected_priorities:
self.assertAlmostEqual(expected_priorities[key], priority)
def test_insert_raises_if_priorities_empty(self):
with self.assertRaises(ValueError):
self.client.insert([1], {})
def test_insert(self):
self.client.insert(1, {TABLE_NAME: 1.0}) # This should be sampled often.
self.client.insert(2, {TABLE_NAME: 0.1}) # This should be sampled rarely.
self.client.insert(3, {TABLE_NAME: 0.0}) # This should never be sampled.
freqs = self._get_sample_frequency()
self.assertLen(freqs, 2)
self.assertAlmostEqual(freqs[0], 0.9, delta=0.05)
self.assertAlmostEqual(freqs[1], 0.1, delta=0.05)
def test_writer_raises_if_max_sequence_length_lt_1(self):
with self.assertRaises(ValueError):
self.client.writer(0)
def test_writer_raises_if_chunk_length_lt_1(self):
self.client.writer(2, chunk_length=1) # Should be fine.
for chunk_length in [0, -1]:
with self.assertRaises(ValueError):
self.client.writer(2, chunk_length=chunk_length)
def test_writer_raises_if_chunk_length_gt_max_sequence_length(self):
self.client.writer(2, chunk_length=1) # lt should be fine.
self.client.writer(2, chunk_length=2) # eq should be fine.
with self.assertRaises(ValueError):
self.client.writer(2, chunk_length=3)
def test_writer_raises_if_max_in_flight_items_lt_1(self):
self.client.writer(1, max_in_flight_items=1)
self.client.writer(1, max_in_flight_items=2)
self.client.writer(1, max_in_flight_items=None)
with self.assertRaises(ValueError):
self.client.writer(1, max_in_flight_items=-1)
def test_writer_works_with_no_retries(self):
# If the server responds correctly, the writer ignores the no retries arg.
writer = self.client.writer(2)
writer.append([0])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.close(retry_on_unavailable=False)
def test_writer(self):
with self.client.writer(2) as writer:
writer.append([0])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.append([1])
writer.create_item(TABLE_NAME, 2, 1.0)
writer.append([2])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.append_sequence([np.array([3, 4])])
writer.create_item(TABLE_NAME, 2, 1.0)
freqs = self._get_sample_frequency()
self.assertLen(freqs, 4)
for freq in freqs:
self.assertAlmostEqual(freq, 0.25, delta=0.05)
def test_write_and_sample_different_shapes_and_dtypes(self):
trajectories = [
np.ones([], np.int64),
np.ones([2, 2], np.float32),
np.ones([3, 3], np.int32),
]
for trajectory in trajectories:
self.client.insert(trajectory, {SIMPLE_QUEUE_NAME: 1.0})
for i, [sample] in enumerate(self.client.sample(SIMPLE_QUEUE_NAME, 3)):
np.testing.assert_array_equal(trajectories[i], sample.data[0])
def test_mutate_priorities_update(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
before = self._get_sample_frequency()
self.assertLen(before, 3)
for freq in before:
self.assertAlmostEqual(freq, 0.33, delta=0.05)
key = next(self.client.sample(TABLE_NAME, 1))[0].info.key
self.client.mutate_priorities(TABLE_NAME, updates={key: 0.5})
after = self._get_sample_frequency()
self.assertLen(after, 3)
self.assertAlmostEqual(after[0], 0.4, delta=0.05)
self.assertAlmostEqual(after[1], 0.4, delta=0.05)
self.assertAlmostEqual(after[2], 0.2, delta=0.05)
def test_mutate_priorities_delete(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
before = self._get_sample_frequency()
self.assertLen(before, 4)
key = next(self.client.sample(TABLE_NAME, 1))[0].info.key
self.client.mutate_priorities(TABLE_NAME, deletes=[key])
after = self._get_sample_frequency()
self.assertLen(after, 3)
def test_reset(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
keys_before = set(
sample[0].info.key for sample in self.client.sample(TABLE_NAME, 1000))
self.assertLen(keys_before, 3)
self.client.reset(TABLE_NAME)
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
keys_after = set(
sample[0].info.key for sample in self.client.sample(TABLE_NAME, 1000))
self.assertLen(keys_after, 3)
self.assertTrue(keys_after.isdisjoint(keys_before))
def test_server_info(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
list(self.client.sample(TABLE_NAME, 1))
server_info = self.client.server_info()
self.assertLen(server_info, 3)
self.assertIn(TABLE_NAME, server_info)
table = server_info[TABLE_NAME]
self.assertEqual(table.current_size, 3)
self.assertEqual(table.num_unique_samples, 1)
self.assertEqual(table.max_size, 1000)
self.assertEqual(table.sampler_options.prioritized.priority_exponent, 1)
self.assertTrue(table.remover_options.fifo)
self.assertEqual(table.signature, tf.TensorSpec(dtype=tf.int64, shape=[]))
self.assertIn(NESTED_SIGNATURE_TABLE_NAME, server_info)
queue = server_info[NESTED_SIGNATURE_TABLE_NAME]
self.assertEqual(queue.current_size, 0)
self.assertEqual(queue.num_unique_samples, 0)
self.assertEqual(queue.max_size, 10)
self.assertTrue(queue.sampler_options.fifo)
self.assertTrue(queue.remover_options.fifo)
self.assertEqual(queue.signature, QUEUE_SIGNATURE)
self.assertIn(SIMPLE_QUEUE_NAME, server_info)
info = server_info[SIMPLE_QUEUE_NAME]
self.assertEqual(info.current_size, 0)
self.assertEqual(info.num_unique_samples, 0)
self.assertEqual(info.max_size, 10)
self.assertTrue(info.sampler_options.fifo)
self.assertTrue(info.remover_options.fifo)
self.assertIsNone(info.signature)
def test_sample_trajectory_with_signature(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=NESTED_SIGNATURE_TABLE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(NESTED_SIGNATURE_TABLE_NAME,
emit_timesteps=False,
unpack_as_table_signature=True))
# The data should be be unpacked as the structure of the table.
want = {
'a': np.ones([3], np.int64),
'b': np.ones([3, 2, 2], np.float32),
}
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_without_signature(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=SIMPLE_QUEUE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False,
unpack_as_table_signature=True))
# The data should be flat as the table has no signature. Each element within
# the flat data should represent the entire column (i.e not just one step).
want = [np.ones([3], np.int64), np.ones([3, 2, 2], np.float32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_as_flat_data(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=NESTED_SIGNATURE_TABLE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(NESTED_SIGNATURE_TABLE_NAME,
emit_timesteps=False,
unpack_as_table_signature=False))
# The table has a signature but we requested the data to be flat.
want = [np.ones([3], np.int64), np.ones([3, 2, 2], np.float32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_written_with_insert(self):
self.client.insert(np.ones([3, 3], np.int32), {SIMPLE_QUEUE_NAME: 1.0})
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False))
# An extra batch dimension should have been added to the inserted data as
# it is a trajectory of length 1.
want = [np.ones([1, 3, 3], np.int32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_written_with_legacy_writer(self):
with self.client.writer(3) as writer:
for i in range(3):
writer.append([i, np.ones([2, 2], np.float64)])
writer.create_item(SIMPLE_QUEUE_NAME, 3, 1.0)
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False))
# The time dimension should have been added to all fields.
want = [np.array([0, 1, 2]), np.ones([3, 2, 2], np.float64)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_server_info_timeout(self):
# Setup a client that doesn't actually connect to anything.
dummy_client = client.Client(f'localhost:{self.server.port + 1}')
with self.assertRaises(
errors.DeadlineExceededError,
msg='ServerInfo call did not complete within provided timeout of 1s'):
dummy_client.server_info(timeout=1)
def test_pickle(self):
loaded_client = pickle.loads(pickle.dumps(self.client))
self.assertEqual(loaded_client._server_address, self.client._server_address)
loaded_client.insert([0], {TABLE_NAME: 1.0})
def test_multithreaded_writer_using_flush(self):
# Ensure that we don't have any errors caused by multithreaded use of
# writers or clients.
pool = multithreading.Pool(64)
def _write(i):
with self.client.writer(1) as writer:
writer.append([i])
# Make sure that flush before create_item doesn't create trouble.
writer.flush()
writer.create_item(TABLE_NAME, 1, 1.0)
writer.flush()
for _ in range(5):
pool.map(_write, list(range(256)))
info = self.client.server_info()[TABLE_NAME]
self.assertEqual(info.current_size, 1000)
pool.close()
pool.join()
def test_multithreaded_writer_using_scope(self):
# Ensure that we don't have any errors caused by multithreaded use of
# writers or clients.
pool = multithreading.Pool(64)
def _write(i):
with self.client.writer(1) as writer:
writer.append([i])
writer.create_item(TABLE_NAME, 1, 1.0)
for _ in range(5):
pool.map(_write, list(range(256)))
info = self.client.server_info()[TABLE_NAME]
self.assertEqual(info.current_size, 1000)
pool.close()
pool.join()
def test_validates_trajectory_writer_config(self):
with self.assertRaises(ValueError):
self.client.trajectory_writer(0)
with self.assertRaises(ValueError):
self.client.trajectory_writer(-1)
if __name__ == '__main__':
absltest.main()
|
apache-2.0
| 5,739,390,927,420,527,000
| 35.308977
| 80
| 0.652196
| false
| 3.455593
| true
| false
| false
|
mauriciogtec/PropedeuticoDataScience2017
|
Alumnos/Leonardo_Marin/Tarea 2 {spyder}.py
|
1
|
2336
|
#######################################################################
### Parte 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
#####################################################################
## Ejercicio 1
# Importar imagen
imagen = Image.open('C:/Users/Data Mining/Documents/ITAM/Propedeutico/Alumnos/PropedeuticoDataScience2017/Alumnos/Leonardo_Marin/black_and_white.jpg')
imagen_gris = imagen.convert('LA') ## Convertir a escala de grises
## Convertir la imagen a una matriz
imagen_mat = np.array(list(imagen_gris.getdata(band=0)), float)
imagen_mat.shape = (imagen_gris.size[1], imagen_gris.size[0])
imagen_mat = np.matrix(imagen_mat)
plt.figure(figsize=(9, 6))
plt.imshow(imagen_mat, cmap='gray')
## Desomposición singular
U, sigma, V = np.linalg.svd(imagen_mat)
## Probar la visualización con los primeris n vectores
# n= 1
j = 1
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.figure(figsize=(9, 6))
plt.imshow(matriz_equivalente, cmap='gray')
# n = 5
j = 5
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.figure(figsize=(9, 6))
plt.imshow(matriz_equivalente, cmap='gray')
# n = 25
j = 25
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.figure(figsize=(9, 6))
plt.imshow(matriz_equivalente, cmap='gray')
# n = 50
j = 50
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.figure(figsize=(9, 6))
plt.imshow(matriz_equivalente, cmap='gray')
## Podemos ver como se puede reconstruir la imagen sin utilizar toda la información de la matriz original,
#####################################################################
## Ejercicio 2
A = np.array([[1,0],[1,2]])
A
def pseudoinversa(A):
U,s,V=np.linalg.svd(A)
D1 = np.dot(V,(np.diag(1/s)))
peudoinversa = np.dot(D1,U.T)
return peudoinversa
B = pseudoinversa(A)
B
def sistema_ecuaciones(A,b):
#Resuelve un sistema de ecuaciones, A es la matriz con los coeficentes de las ecuaciones, b es el vector de re sesultados
x = np.dot(pseudoinversa(A),b.T)
return(x)
A = np.array([[1,0],[1,2]])
A
A.shape
b = np.array([[5,3]])
b
b.shape
## Probar la Función
sistema_ecuaciones(A,b)
##
|
mit
| 2,516,087,497,806,065,700
| 18.272727
| 150
| 0.606775
| false
| 2.562637
| false
| false
| false
|
nemesisdesign/openwisp2
|
openwisp_controller/vpn_backends.py
|
1
|
1648
|
from copy import deepcopy
from netjsonconfig import OpenVpn as BaseOpenVpn
# adapt OpenVPN schema in order to limit it to 1 item only
limited_schema = deepcopy(BaseOpenVpn.schema)
limited_schema['properties']['openvpn'].update(
{'additionalItems': False, 'minItems': 1, 'maxItems': 1}
)
# server mode only
limited_schema['properties']['openvpn']['items'].update(
{
'oneOf': [
{'$ref': '#/definitions/server_bridged'},
{'$ref': '#/definitions/server_routed'},
{'$ref': '#/definitions/server_manual'},
]
}
)
limited_schema['required'] = limited_schema.get('required', [])
limited_schema['required'].append('openvpn')
# default values for ca, cert and key
limited_schema['definitions']['tunnel']['properties']['ca']['default'] = 'ca.pem'
limited_schema['definitions']['tunnel']['properties']['cert']['default'] = 'cert.pem'
limited_schema['definitions']['tunnel']['properties']['key']['default'] = 'key.pem'
limited_schema['definitions']['server']['properties']['dh']['default'] = 'dh.pem'
limited_schema['properties']['files']['default'] = [
{'path': 'ca.pem', 'mode': '0644', 'contents': '{{ ca }}'},
{'path': 'cert.pem', 'mode': '0644', 'contents': '{{ cert }}'},
{'path': 'key.pem', 'mode': '0644', 'contents': '{{ key }}'},
{'path': 'dh.pem', 'mode': '0644', 'contents': '{{ dh }}'},
]
class OpenVpn(BaseOpenVpn):
"""
modified OpenVpn backend
its schema is adapted to be used as a VPN Server backend:
* shows server only
* allows only 1 vpn
* adds default values for ca, cert, key and dh
"""
schema = limited_schema
|
gpl-3.0
| 8,774,440,878,577,652,000
| 35.622222
| 85
| 0.613471
| false
| 3.695067
| false
| false
| false
|
doconix/django-mako-plus
|
django_mako_plus/tags.py
|
1
|
2889
|
from django.template import engines
from django.template import TemplateDoesNotExist
from mako.runtime import supports_caller
###
### Mako-style tags that DMP provides
###
###############################################################
### Include Django templates
###
def django_include(context, template_name, **kwargs):
'''
Mako tag to include a Django template withing the current DMP (Mako) template.
Since this is a Django template, it is search for using the Django search
algorithm (instead of the DMP app-based concept).
See https://docs.djangoproject.com/en/2.1/topics/templates/.
The current context is sent to the included template, which makes all context
variables available to the Django template. Any additional kwargs are added
to the context.
'''
try:
djengine = engines['django']
except KeyError as e:
raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e
djtemplate = djengine.get_template(template_name)
djcontext = {}
djcontext.update(context)
djcontext.update(kwargs)
return djtemplate.render(djcontext, context['request'])
#########################################################
### Template autoescaping on/off
# attaching to `caller_stack` because it's the same object
# throughout rendering of a template inheritance
AUTOESCAPE_KEY = '__dmp_autoescape'
def is_autoescape(context):
return bool(getattr(context.caller_stack, AUTOESCAPE_KEY, True))
def _toggle_autoescape(context, escape_on=True):
'''
Internal method to toggle autoescaping on or off. This function
needs access to the caller, so the calling method must be
decorated with @supports_caller.
'''
previous = is_autoescape(context)
setattr(context.caller_stack, AUTOESCAPE_KEY, escape_on)
try:
context['caller'].body()
finally:
setattr(context.caller_stack, AUTOESCAPE_KEY, previous)
@supports_caller
def autoescape_on(context):
'''
Mako tag to enable autoescaping for a given block within a template,
(individual filters can still override with ${ somevar | n }).
Example:
<%namespace name="dmp" module="django_mako_plus.tags"/>
<%dmp:autoescape_on>
${ somevar } will be autoescaped.
</%dmp:autoescape_on>
'''
_toggle_autoescape(context, True)
return ''
@supports_caller
def autoescape_off(context):
'''
Mako tag to disable autoescaping for a given block within a template,
(individual filters can still override with ${ somevar | h }).
Example:
<%namespace name="dmp" module="django_mako_plus.tags"/>
<%dmp:autoescape>
${ somevar } will not be autoescaped.
</%dmp:autoescape>
'''
_toggle_autoescape(context, False)
return ''
|
apache-2.0
| -1,902,192,503,060,209,200
| 30.747253
| 149
| 0.66009
| false
| 4.261062
| false
| false
| false
|
opticode/eve
|
eve/__init__.py
|
1
|
1947
|
# -*- coding: utf-8 -*-
"""
Eve
~~~
An out-of-the-box REST Web API that's as dangerous as you want it to be.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
.. versionchanged:: 0.5
'QUERY_WHERE' added.
'QUERY_SORT' added.
'QUERY_PAGE' added.
'QUERY_MAX_RESULTS' added.
'QUERY_PROJECTION' added.
'QUERY_EMBEDDED' added.
'RFC1123_DATE_FORMAT' added.
.. versionchanged:: 0.4
'META' defaults to '_meta'.
'ERROR' defaults to '_error'.
Remove unnecessary commented code.
.. versionchanged:: 0.2
'LINKS' defaults to '_links'.
'ITEMS' defaults to '_items'.
'STATUS' defaults to 'status'.
'ISSUES' defaults to 'issues'.
.. versionchanged:: 0.1.1
'SERVER_NAME' defaults to None.
.. versionchagned:: 0.0.9
'DATE_FORMAT now using GMT instead of UTC.
"""
__version__ = '0.5-dev'
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
RFC1123_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
URL_PREFIX = ''
API_VERSION = ''
SERVER_NAME = None
PAGINATION = True
PAGINATION_LIMIT = 50
PAGINATION_DEFAULT = 25
ID_FIELD = '_id'
CACHE_CONTROL = 'max-age=10,must-revalidate' # TODO confirm this value
CACHE_EXPIRES = 10
RESOURCE_METHODS = ['GET']
ITEM_METHODS = ['GET']
ITEM_LOOKUP = True
ITEM_LOOKUP_FIELD = ID_FIELD
ITEM_URL = '[a-f0-9]{24}'
STATUS_OK = "OK"
STATUS_ERR = "ERR"
LAST_UPDATED = '_updated'
DATE_CREATED = '_created'
ISSUES = '_issues'
STATUS = '_status'
ERROR = '_error'
ITEMS = '_items'
LINKS = '_links'
ETAG = '_etag'
VERSION = '_version'
META = '_meta'
QUERY_WHERE = 'where'
QUERY_SORT = 'sort'
QUERY_PAGE = 'page'
QUERY_MAX_RESULTS = 'max_results'
QUERY_EMBEDDED = 'embedded'
QUERY_PROJECTION = 'projection'
VALIDATION_ERROR_STATUS = 422
# must be the last line (will raise W402 on pyflakes)
from eve.flaskapp import Eve # noqa
|
bsd-3-clause
| 683,748,090,322,254,000
| 21.905882
| 77
| 0.618901
| false
| 2.977064
| false
| false
| false
|
anushbmx/kitsune
|
kitsune/questions/config.py
|
1
|
16005
|
from collections import OrderedDict
from django.utils.translation import ugettext_lazy as _lazy
# The number of answers per page.
ANSWERS_PER_PAGE = 20
# The number of questions per page.
QUESTIONS_PER_PAGE = 20
# Highest ranking to show for a user
HIGHEST_RANKING = 100
# Special tag names:
ESCALATE_TAG_NAME = 'escalate'
NEEDS_INFO_TAG_NAME = 'needsinfo'
OFFTOPIC_TAG_NAME = 'offtopic'
# Escalation config
ESCALATE_EXCLUDE_PRODUCTS = ['thunderbird', 'webmaker', 'open-badges']
# How long until a question is automatically taken away from a user
TAKE_TIMEOUT = 600
# AAQ config:
products = OrderedDict([
('desktop', {
'name': _lazy(u'Firefox'),
'subtitle': _lazy(u'Web browser for Windows, Mac and Linux'),
'extra_fields': ['troubleshooting', 'ff_version', 'os', 'plugins'],
'tags': ['desktop'],
'product': 'firefox',
'categories': OrderedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-and-install',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-problems',
'tags': ['fix-problems'],
}),
('tips', {
'name': _lazy(u'Tips and tricks'),
'topic': 'tips',
'tags': ['tips'],
}),
('bookmarks', {
'name': _lazy(u'Bookmarks'),
'topic': 'bookmarks',
'tags': ['bookmarks'],
}),
('cookies', {
'name': _lazy(u'Cookies'),
'topic': 'cookies',
'tags': ['cookies'],
}),
('tabs', {
'name': _lazy(u'Tabs'),
'topic': 'tabs',
'tags': ['tabs'],
}),
('websites', {
'name': _lazy(u'Websites'),
'topic': 'websites',
'tags': ['websites'],
}),
('sync', {
'name': _lazy(u'Firefox Sync'),
'topic': 'sync',
'tags': ['sync'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('mobile', {
'name': _lazy(u'Firefox for Android'),
'subtitle': _lazy(u'Web browser for Android smartphones and tablets'),
'extra_fields': ['ff_version', 'os', 'plugins'],
'tags': ['mobile'],
'product': 'mobile',
'categories': OrderedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-and-install',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-problems',
'tags': ['fix-problems'],
}),
('tips', {
'name': _lazy(u'Tips and tricks'),
'topic': 'tips',
'tags': ['tips'],
}),
('bookmarks', {
'name': _lazy(u'Bookmarks'),
'topic': 'bookmarks',
'tags': ['bookmarks'],
}),
('cookies', {
'name': _lazy(u'Cookies'),
'topic': 'cookies',
'tags': ['cookies'],
}),
('tabs', {
'name': _lazy(u'Tabs'),
'topic': 'tabs',
'tags': ['tabs'],
}),
('websites', {
'name': _lazy(u'Websites'),
'topic': 'websites',
'tags': ['websites'],
}),
('sync', {
'name': _lazy(u'Firefox Sync'),
'topic': 'sync',
'tags': ['sync'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('ios', {
'name': _lazy(u'Firefox for iOS'),
'subtitle': _lazy(u'Firefox for iPhone, iPad and iPod touch devices'),
'extra_fields': ['ff_version', 'os', 'plugins'],
'tags': ['ios'],
'product': 'ios',
'categories': OrderedDict([
('install-and-update-firefox-ios', {
'name': _lazy(u'Install and Update'),
'topic': 'install-and-update-firefox-ios',
'tags': ['install-and-update-firefox-ios']
}),
('how-to-use-firefox-ios', {
'name': _lazy(u'How to use Firefox for iOS'),
'topic': 'how-to-use-firefox-ios',
'tags': ['how-to-use-firefox-ios']
}),
('firefox-ios-not-working-expected', {
'name': _lazy(u'Firefox for iOS is not working as expected'),
'topic': 'firefox-ios-not-working-expected',
'tags': ['firefox-ios-not-working-expected']
}),
])
}),
('focus', {
'name': _lazy(u'Firefox Focus'),
'subtitle': _lazy(u'Automatic privacy browser and content blocker'),
'extra_fields': [],
'tags': ['focus-firefox'],
'product': 'focus-firefox',
'categories': OrderedDict([
('Focus-ios', {
'name': _lazy(u'Firefox Focus for iOS'),
'topic': 'Focus-ios',
'tags': ['Focus-ios']
}),
('firefox-focus-android', {
'name': _lazy(u'Firefox Focus for Android'),
'topic': 'firefox-focus-android',
'tags': ['firefox-focus-android']
}),
])
}),
('firefox-amazon-devices', {
'name': _lazy(u'Firefox for Amazon Devices'),
'subtitle': _lazy(u'Browser for Amazon devices'),
'extra_fields': [],
'tags': ['firefox-amazon'],
'product': 'firefox-amazon-devices',
'categories': OrderedDict([
('firefox-fire-tv', {
'name': _lazy(u'Firefox for Fire TV'),
'topic': 'firefox-fire-tv',
'tags': ['firefox-fire-tv']
}),
('firefox-echo-show', {
'name': _lazy(u'Firefox for Echo Show'),
'topic': 'firefox-echo-show',
'tags': ['firefox-echo-show']
}),
])
}),
('thunderbird', {
'name': _lazy(u'Thunderbird'),
'subtitle': _lazy(u'Email software for Windows, Mac and Linux'),
'extra_fields': [],
'tags': [],
'product': 'thunderbird',
'categories': OrderedDict([
# TODO: Just use the IA topics for this.
# See bug 979397
('download-and-install', {
'name': _lazy(u'Download, install and migration'),
'topic': 'download-install-and-migration',
'tags': ['download-and-install'],
}),
('privacy-and-security', {
'name': _lazy(u'Privacy and security settings'),
'topic': 'privacy-and-security-settings',
'tags': ['privacy-and-security'],
}),
('customize', {
'name': _lazy(u'Customize controls, options and add-ons'),
'topic': 'customize-controls-options-and-add-ons',
'tags': ['customize'],
}),
('fix-problems', {
'name': _lazy(u'Fix slowness, crashing, error messages and '
u'other problems'),
'topic': 'fix-slowness-crashing-error-messages-and-other-'
'problems',
'tags': ['fix-problems'],
}),
('calendar', {
'name': _lazy(u'Calendar'),
'topic': 'calendar',
'tags': ['calendar'],
}),
('other', {
'name': _lazy(u'Other'),
'topic': 'other',
'tags': ['other'],
}),
])
}),
('firefox-lite', {
'name': _lazy(u'Firefox Lite'),
'subtitle': _lazy(u'Mobile browser for Indonesia'),
'extra_fields': [],
'tags': ['firefox-lite'],
'product': 'firefox-lite',
'categories': OrderedDict([
('get-started', {
'name': _lazy(u'Get started'),
'topic': 'get-started',
'tags': ['get-started']
}),
('fix-problems', {
'name': _lazy(u'Fix problems'),
'topic': 'fix-problems',
'tags': ['fix-problems']
}),
])
}),
('firefox-enterprise', {
'name': _lazy(u'Firefox for Enterprise'),
'subtitle': _lazy(u'Enterprise version of Firefox'),
'extra_fields': [],
'tags': [],
'product': 'firefox-enterprise',
'categories': OrderedDict([
('deploy-firefox-for-enterprise', {
'name': _lazy(u'Deploy'),
'topic': 'deploy-firefox-for-enterprise',
'tags': ['deployment'],
}),
('policies-customization-enterprise', {
'name': _lazy(u'Manage updates, policies & customization'),
'topic': 'policies-customization-enterprise',
'tags': ['customization'],
}),
('manage-add-ons-enterprise', {
'name': _lazy(u'Manage add-ons'),
'topic': 'manage-add-ons-enterprise',
'tags': ['customization'],
}),
('manage-certificates-firefox-enterprise', {
'name': _lazy(u'Manage certificates'),
'topic': 'manage-certificates-firefox-enterprise',
'tags': ['customization'],
}),
])
}),
('firefox-reality', {
'name': _lazy(u'Firefox Reality'),
'subtitle': _lazy(u'Firefox for Virtual Reality'),
'extra_fields': [],
'tags': [],
'product': 'firefox-reality',
'categories': OrderedDict([
('get-started', {
'name': _lazy(u'Get started with Firefox Reality'),
'topic': 'get-started',
'tags': ['get-started'],
}),
('troubleshooting-reality', {
'name': _lazy(u'Troubleshooting Firefox Reality'),
'topic': 'troubleshooting-reality',
'tags': ['troubleshooting'],
}),
])
}),
('firefox-preview', {
'name': _lazy(u'Firefox Preview'),
'subtitle': _lazy(u'Firefox for Android'),
'extra_fields': [],
'tags': [],
'product': 'firefox-preview',
'categories': OrderedDict([
('install-and-update-firefox-preview', {
'name': _lazy(u'Install and Update'),
'topic': 'install-and-update',
'tags': ['download-and-install'],
}),
('how-to-use-firefox-preview', {
'name': _lazy(u'How do I use Firefox Preview'),
'topic': 'how-do-i-use-firefox-preview',
'tags': ['tips'],
}),
('browsing-firefox-preview', {
'name': _lazy(u'Browsing'),
'topic': 'browsing-preview',
'tags': ['tips'],
}),
('library-firefox-preview', {
'name': _lazy(u'Library'),
'topic': 'library',
'tags': ['library'],
}),
('sync-firefox-preview', {
'name': _lazy(u'Sync'),
'topic': 'sync-preview',
'tags': ['sync'],
}),
('privacy-and-security-firefox-preview', {
'name': _lazy(u'Privacy and Security'),
'topic': 'privacy-and-security',
'tags': ['privacy-and-security'],
}),
('fix-problems-with-firefox-preview', {
'name': _lazy(u'Fix problems with Firefox Preview'),
'topic': 'fix-problems-firefox-preview',
'tags': ['fix-problems'],
}),
('settings-and-preferences-firefox-preview', {
'name': _lazy(u'Settings and Preferences'),
'topic': 'settings-prefs-preview',
'tags': ['customize'],
}),
('advanced-settings-firefox-preview', {
'name': _lazy(u'Advanced Settings'),
'topic': 'advanced-settings-preview',
'tags': ['customize'],
}),
])
}),
('firefox-lockwise', {
'name': _lazy(u'Firefox Lockwise'),
'subtitle': _lazy(u'Firefox Lockwise'),
'extra_fields': [],
'tags': [],
'product': 'firefox-lockwise',
'categories': OrderedDict([
('install-and-set-up', {
'name': _lazy(u'Install and set up'),
'topic': 'install-lockwise',
'tags': ['install-and-set-up'],
}),
('manage-settings-and-logins', {
'name': _lazy(u'Manage settings and logins'),
'topic': 'lockwise-settings',
'tags': ['settings-and-logins'],
}),
('fix-problems-with-firefox-lockwise', {
'name': _lazy(u'Fix problems with Firefox Lockwise'),
'topic': 'fix-problems-lockwise',
'tags': ['fix-problems'],
}),
])
}),
('other', {
'name': _lazy(u'Other Mozilla products'),
'subtitle': '',
'product': '',
'html': _lazy(u'This site only provides support for some of our products. '
u'For other support, please find your product below.'
u'<ul class="product-support">'
u'<li><a href="http://www.seamonkey-project.org/doc/">'
u'SeaMonkey support</a></li>'
u'<li><a '
u'href="/questions/new/thunderbird">'
u'Lightning support</a></li>'
u'</ul>'),
'categories': OrderedDict([]),
'deadend': True,
}),
])
def add_backtrack_keys(products):
"""Insert 'key' keys so we can go from product or category back to key."""
for p_k, p_v in products.iteritems():
p_v['key'] = p_k
for c_k, c_v in p_v['categories'].iteritems():
c_v['key'] = c_k
add_backtrack_keys(products)
|
bsd-3-clause
| 1,854,020,737,929,702,700
| 34.64588
| 83
| 0.436114
| false
| 4.278268
| false
| false
| false
|
chaen/DIRAC
|
ResourceStatusSystem/Service/ResourceManagementHandler.py
|
1
|
6218
|
''' ResourceManagementHandler
Module that allows users to access the ResourceManagementDB remotely.
'''
from DIRAC import gConfig, S_OK, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ResourceStatusSystem.Utilities import Synchronizer
from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import ResourceManagementDB
__RCSID__ = '$Id$'
def initializeResourceManagementHandler(_serviceInfo):
'''
Handler initialization, where we set the ResourceManagementDB as global db.
'''
global db
db = ResourceManagementDB()
syncObject = Synchronizer.Synchronizer()
gConfig.addListenerToNewVersionEvent(syncObject.sync)
return S_OK()
################################################################################
class ResourceManagementHandler(RequestHandler):
'''
The ResourceManagementHandler exposes the DB front-end functions through a
XML-RPC server, functionalities inherited from :class:`DIRAC.Core.DISET.Reques\
tHandler.RequestHandler`
According to the ResourceManagementDB philosophy, only functions of the type:
- insert
- select
- delete
- addOrModify
are exposed. If you need anything more complicated, either look for it on the
:class:`ResourceManagementClient`, or code it yourself. This way the DB and the
Service are kept clean and tidied.
To can use this service on this way, but you MUST NOT DO IT. Use it through the
:class:`ResourceManagementClient`. If offers in the worst case as good perfor\
mance as the :class:`ResourceManagementHandler`, if not better.
>>> from DIRAC.Core.DISET.RPCClient import RPCCLient
>>> server = RPCCLient("ResourceStatus/ResourceManagement")
'''
def __init__(self, *args, **kwargs):
super(ResourceManagementHandler, self).__init__(*args, **kwargs)
@staticmethod
def __logResult(methodName, result):
'''
Method that writes to log error messages
'''
if not result['OK']:
gLogger.error('%s : %s' % (methodName, result['Message']))
@staticmethod
def setDatabase(database):
'''
This method let us inherit from this class and overwrite the database object
without having problems with the global variables.
:Parameters:
**database** - `MySQL`
database used by this handler
:return: None
'''
global db
db = database
types_insert = [basestring, dict]
def export_insert(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It
does not add neither processing nor validation. If you need to know more
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
gLogger.info('insert: %s %s' % (table, params))
# remove unnecessary key generated by locals()
del params['self']
res = db.insert(table, params)
self.__logResult('insert', res)
return res
types_select = [[basestring, dict], dict]
def export_select(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.
It does not add neither processing nor validation. If you need to know more\
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
gLogger.info('select: %s %s' % (table, params))
res = db.select(table, params)
self.__logResult('select', res)
return res
types_delete = [[basestring, dict], dict]
def export_delete(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely.\
It does not add neither processing nor validation. If you need to know more \
about this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
gLogger.info('delete: %s %s' % (table, params))
res = db.delete(table, params)
self.__logResult('delete', res)
return res
types_addOrModify = [[basestring, dict], dict]
def export_addOrModify(self, table, params):
'''
This method is a bridge to access :class:`ResourceManagementDB` remotely. It does
not add neither processing nor validation. If you need to know more about
this method, you must keep reading on the database documentation.
:Parameters:
**table** - `string` or `dict`
should contain the table from which querying
if it's a `dict` the query comes from a client prior to v6r18
**params** - `dict`
arguments for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
gLogger.info('addOrModify: %s %s' % (table, params))
res = db.addOrModify(table, params)
self.__logResult('addOrModify', res)
return res
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
gpl-3.0
| 3,929,100,881,457,097,700
| 30.72449
| 92
| 0.660663
| false
| 4.115156
| false
| false
| false
|
fcopantoja/djangomx
|
djangomx/blog/models.py
|
1
|
3689
|
# coding: utf-8
import os
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext as _
from core.utils import get_filename
class Category(models.Model):
""" Category Model """
title = models.CharField(
verbose_name=_(u'Título'),
help_text=_(u' '),
max_length=255
)
slug = models.SlugField(
verbose_name=_(u'Slug'),
help_text=_(u'Identificador Uri'),
max_length=255,
unique=True
)
description = models.CharField(
verbose_name=_(u'Descripción'),
help_text=_(u' '),
max_length=255,
blank=True
)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _(u'Categoría')
verbose_name_plural = _(u'Categorías')
def __unicode__(self):
return "%s" % (self.title,)
def get_img_path(instance, filename):
name, ext = os.path.splitext(filename)
return 'blog/%s' % get_filename(ext)
class Post(models.Model):
""" Post Model """
title = models.CharField(
verbose_name=_(u'Título'),
help_text=_(u' '),
max_length=255
)
description = models.TextField(
blank=True, null=True, help_text=u'Descripción usada para SEO'
)
slug = models.SlugField(
verbose_name=_(u'Slug'),
help_text=_(u'Identificador Uri'),
max_length=255,
unique=True
)
image = models.ImageField(
verbose_name=_(u'Imágen'),
help_text=_(u'Imagen destacada'),
blank=True,
upload_to=get_img_path
)
content = models.TextField(help_text=_(u'Este es el contenido de el Post'),)
extract = models.TextField(
blank=True,
help_text=_(u'Este es solo un resumen de el Post que se muestra en la \
lista de posts'),
)
category = models.ForeignKey(
Category,
verbose_name=_(u'Categoría'),
null=True,
blank=True
)
author = models.ForeignKey(User, verbose_name=_(u'Autor'))
published_at = models.DateTimeField(
verbose_name=_(u'Fecha de publicación')
)
likes = models.PositiveIntegerField(verbose_name=_(u'Likes'), default=0)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _(u'Posts')
verbose_name_plural = _(u'Posts')
ordering = ["-created_at"]
def __unicode__(self):
return "%s" % (self.title,)
def get_admin_url(self):
content_type = ContentType.objects.get_for_model(self.__class__)
return urlresolvers.reverse(
"admin:%s_%s_change" % (
content_type.app_label, content_type.model
),
args=(self.id,)
)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('blog:view_post', args=[str(self.slug)])
@property
def full_url(self):
current_site = Site.objects.get_current()
return '{}{}'.format(current_site.domain, self.get_absolute_url())
@property
def img_full_url(self):
if self.image:
current_site = Site.objects.get_current()
return '{}{}'.format(current_site.domain, self.image.url)
else:
return ''
|
mit
| 760,292,171,597,744,800
| 28.44
| 80
| 0.602717
| false
| 3.698492
| false
| false
| false
|
marvin-ai/marvin-python-toolbox
|
marvin_python_toolbox/engine_base/engine_base_prediction.py
|
1
|
1656
|
#!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from .._compatibility import six
from .._logging import get_logger
from .engine_base_action import EngineBaseOnlineAction
__all__ = ['EngineBasePrediction']
logger = get_logger('engine_base_prediction')
class EngineBasePrediction(EngineBaseOnlineAction):
__metaclass__ = ABCMeta
_model = None
_metrics = None
def __init__(self, **kwargs):
self._model = self._get_arg(kwargs=kwargs, arg='model')
self._metrics = self._get_arg(kwargs=kwargs, arg='metrics')
super(EngineBasePrediction, self).__init__(**kwargs)
@property
def marvin_model(self):
return self._load_obj(object_reference='_model')
@marvin_model.setter
def marvin_model(self, model):
self._save_obj(object_reference='_model', obj=model)
@property
def marvin_metrics(self):
return self._load_obj(object_reference='_metrics')
@marvin_metrics.setter
def marvin_metrics(self, metrics):
self._save_obj(object_reference='_metrics', obj=metrics)
|
apache-2.0
| 1,667,712,753,690,461,200
| 28.571429
| 74
| 0.701087
| false
| 3.833333
| false
| false
| false
|
weiweihuanghuang/wei-glyphs-scripts
|
Spacing/Show Kerning Pairs Exception.py
|
1
|
3326
|
#MenuTitle: Show Kerning Pairs Exception
# -*- coding: utf-8 -*-
__doc__="""
Show Kerning Exception Pairs for this glyph in a new tab.
"""
import GlyphsApp
thisFont = Glyphs.font
Doc = Glyphs.currentDocument
selectedLayers = thisFont.selectedLayers
namesOfSelectedGlyphs = [ l.parent.name for l in selectedLayers if hasattr(l.parent, 'name')]
namesOfSelectedGlyphs = [i for i in namesOfSelectedGlyphs if i != "/space"]
selectedMaster = thisFont.selectedFontMaster
masterID = selectedMaster.id
# Look for:
# New Tab for every glyph
# to make it every glyph new tab
def nameMaker(kernGlyphOrGroup, side):
# if this is a kerning group
if kernGlyphOrGroup[0] == "@":
for g in thisFont.glyphs:
# right glyph
if side == "right":
# left side of right glyph
if g.leftKerningGroup == kernGlyphOrGroup[7:]:
return g.name
if side == "left":
# right side of left glyph
if g.rightKerningGroup == kernGlyphOrGroup[7:]:
return g.name
else:
return thisFont.glyphForId_(kernGlyphOrGroup).name
# One Tab for all
editString = u""""""
for thisGlyphName in namesOfSelectedGlyphs:
# New Tab for every glyph
# editString = u""""""
thisGlyph = thisFont.glyphs[thisGlyphName]
rGroupName = str(thisGlyph.rightKerningGroup)
lGroupName = str(thisGlyph.leftKerningGroup)
for L in thisFont.kerning[ masterID ]:
try:
# If L matches thisGlyph or its right side group
# @L R
# if L[0] == "@" and rGroupName == L[7:] or rGroupName == thisFont.glyphForId_(L).name:
# # for every R counterpart to L in the kerning pairs of rGroupName
# for R in thisFont.kerning[masterID][L]:
# # R is not group kerning
# if thisFont.kerning[masterID][L][R] != 0 and R[0] != "@":
# print "L: @L R\t\t", L, R
# print "\t", "%s, %s" % (thisGlyphName, nameMaker(R, "right"))
# kernPair = "/%s/%s " % (thisGlyphName, nameMaker(R, "right"))
# editString += kernPair
# L @R, L R
if thisFont.glyphForId_(L).name == thisGlyph.name:
# for every R counterpart to L in the kerning pairs of rGroupName
for R in thisFont.kerning[masterID][L]:
if thisFont.kerning[masterID][L][R] < 8e+10:
# print "L: L @R, L R\t", L, R
# print "\t", "%s, %s" % (thisGlyphName, nameMaker(R, "right"))
kernPair = "/%s/%s " % (thisGlyphName, nameMaker(R, "right"))
editString += kernPair
except:
pass
for R in thisFont.kerning[masterID][L]:
try:
# If R matches thisGlyph or its left side group
# L @R
# if R[0] == "@" and lGroupName == R[7:] or lGroupName == thisFont.glyphForId_(R).name:
# if thisFont.kerning[masterID][L][R] != 0 and L[0] != "@":
# print "R: L @R\t\t", L, R
# print "\t", "%s, %s" % (nameMaker(L, "left"), thisGlyphName)
# kernPair = "/%s/%s " % (nameMaker(L, "left"), thisGlyphName)
# editString += kernPair
# @L R, L R
if thisFont.glyphForId_(R).name == thisGlyph.name:
if thisFont.kerning[masterID][L][R] < 8e+10:
# print "R: @L R, L R\t", L, R
# print "\t", "%s, %s" % (nameMaker(L, "left"), thisGlyphName)
kernPair = "/%s/%s " % (nameMaker(L, "left"), thisGlyphName)
editString += kernPair
except:
pass
# New Tab for every glyph
# thisFont.newTab(editString)
# One Tab for all
# editString += "\n"
thisFont.newTab(editString)
|
apache-2.0
| -6,634,094,482,841,212,000
| 31.617647
| 93
| 0.634396
| false
| 2.746491
| false
| false
| false
|
ddico/odoo
|
addons/hr_recruitment/models/hr_job.py
|
1
|
6632
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from odoo import api, fields, models, _
class Job(models.Model):
_name = "hr.job"
_inherit = ["mail.alias.mixin", "hr.job"]
_order = "state desc, name asc"
@api.model
def _default_address_id(self):
return self.env.company.partner_id
def _get_default_favorite_user_ids(self):
return [(6, 0, [self.env.uid])]
address_id = fields.Many2one(
'res.partner', "Job Location", default=_default_address_id,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
help="Address where employees are working")
application_ids = fields.One2many('hr.applicant', 'job_id', "Applications")
application_count = fields.Integer(compute='_compute_application_count', string="Application Count")
new_application_count = fields.Integer(
compute='_compute_new_application_count', string="New Application",
help="Number of applications that are new in the flow (typically at first step of the flow)")
manager_id = fields.Many2one(
'hr.employee', related='department_id.manager_id', string="Department Manager",
readonly=True, store=True)
user_id = fields.Many2one('res.users', "Responsible", tracking=True)
hr_responsible_id = fields.Many2one(
'res.users', "HR Responsible", tracking=True,
help="Person responsible of validating the employee's contracts.")
document_ids = fields.One2many('ir.attachment', compute='_compute_document_ids', string="Documents")
documents_count = fields.Integer(compute='_compute_document_ids', string="Document Count")
alias_id = fields.Many2one(
'mail.alias', "Alias", ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically create new applicants for this job position.")
color = fields.Integer("Color Index")
is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite')
favorite_user_ids = fields.Many2many('res.users', 'job_favorite_user_rel', 'job_id', 'user_id', default=_get_default_favorite_user_ids)
def _compute_is_favorite(self):
for job in self:
job.is_favorite = self.env.user in job.favorite_user_ids
def _inverse_is_favorite(self):
unfavorited_jobs = favorited_jobs = self.env['hr.job']
for job in self:
if self.env.user in job.favorite_user_ids:
unfavorited_jobs |= job
else:
favorited_jobs |= job
favorited_jobs.write({'favorite_user_ids': [(4, self.env.uid)]})
unfavorited_jobs.write({'favorite_user_ids': [(3, self.env.uid)]})
def _compute_document_ids(self):
applicants = self.mapped('application_ids').filtered(lambda self: not self.emp_id)
app_to_job = dict((applicant.id, applicant.job_id.id) for applicant in applicants)
attachments = self.env['ir.attachment'].search([
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicants.ids)])
result = dict.fromkeys(self.ids, self.env['ir.attachment'])
for attachment in attachments:
if attachment.res_model == 'hr.applicant':
result[app_to_job[attachment.res_id]] |= attachment
else:
result[attachment.res_id] |= attachment
for job in self:
job.document_ids = result[job.id]
job.documents_count = len(job.document_ids)
def _compute_application_count(self):
read_group_result = self.env['hr.applicant'].read_group([('job_id', 'in', self.ids)], ['job_id'], ['job_id'])
result = dict((data['job_id'][0], data['job_id_count']) for data in read_group_result)
for job in self:
job.application_count = result.get(job.id, 0)
def _get_first_stage(self):
self.ensure_one()
return self.env['hr.recruitment.stage'].search([
'|',
('job_ids', '=', False),
('job_ids', '=', self.id)], order='sequence asc', limit=1)
def _compute_new_application_count(self):
for job in self:
job.new_application_count = self.env["hr.applicant"].search_count(
[("job_id", "=", job.id), ("stage_id", "=", job._get_first_stage().id)]
)
def _alias_get_creation_values(self):
values = super(Job, self)._alias_get_creation_values()
values['alias_model_id'] = self.env['ir.model']._get('hr.applicant').id
if self.id:
values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}")
defaults.update({
'job_id': self.id,
'department_id': self.department_id.id,
'company_id': self.department_id.company_id.id if self.department_id else self.company_id.id,
})
return values
@api.model
def create(self, vals):
vals['favorite_user_ids'] = vals.get('favorite_user_ids', []) + [(4, self.env.uid)]
new_job = super(Job, self).create(vals)
utm_linkedin = self.env.ref("utm.utm_source_linkedin", raise_if_not_found=False)
if utm_linkedin:
source_vals = {
'source_id': utm_linkedin.id,
'job_id': new_job.id,
}
self.env['hr.recruitment.source'].create(source_vals)
return new_job
def _creation_subtype(self):
return self.env.ref('hr_recruitment.mt_job_new')
def action_get_attachment_tree_view(self):
action = self.env.ref('base.action_attachment').read()[0]
action['context'] = {
'default_res_model': self._name,
'default_res_id': self.ids[0]
}
action['search_view_id'] = (self.env.ref('hr_recruitment.ir_attachment_view_search_inherit_hr_recruitment').id, )
action['domain'] = ['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', self.mapped('application_ids').ids)]
return action
def close_dialog(self):
return {'type': 'ir.actions.act_window_close'}
def edit_dialog(self):
form_view = self.env.ref('hr.view_hr_job_form')
return {
'name': _('Job'),
'res_model': 'hr.job',
'res_id': self.id,
'views': [(form_view.id, 'form'),],
'type': 'ir.actions.act_window',
'target': 'inline'
}
|
agpl-3.0
| -2,389,517,044,412,755,000
| 44.424658
| 190
| 0.592129
| false
| 3.561762
| false
| false
| false
|
emijrp/wmcharts
|
wmchart0004.py
|
1
|
3064
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 emijrp <emijrp@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from wmchart0000 import *
def main():
filename = 'wmchart0004.php'
title = 'Deletions and restorations'
description = "This chart shows how many deletions and restorations were made in the last days."
projectdbs = getProjectDatabases()
queries = [
["Deletions", "SELECT CONCAT(YEAR(log_timestamp),'-',LPAD(MONTH(log_timestamp),2,'0'),'-',LPAD(DAY(log_timestamp),2,'0'),'T00:00:00Z') AS date, COUNT(*) AS count FROM logging WHERE log_timestamp>=DATE_ADD(NOW(), INTERVAL -%d DAY) AND log_action='delete' GROUP BY date ORDER BY date ASC" % (lastdays)],
["Article deletions", "SELECT CONCAT(YEAR(log_timestamp),'-',LPAD(MONTH(log_timestamp),2,'0'),'-',LPAD(DAY(log_timestamp),2,'0'),'T00:00:00Z') AS date, COUNT(*) AS count FROM logging WHERE log_namespace=0 AND log_timestamp>=DATE_ADD(NOW(), INTERVAL -%d DAY) AND log_action='delete' GROUP BY date ORDER BY date ASC" % (lastdays)],
["Restorations", "SELECT CONCAT(YEAR(log_timestamp),'-',LPAD(MONTH(log_timestamp),2,'0'),'-',LPAD(DAY(log_timestamp),2,'0'),'T00:00:00Z') AS date, COUNT(*) AS count FROM logging WHERE log_timestamp>=DATE_ADD(NOW(), INTERVAL -%d DAY) AND log_action='restore' GROUP BY date ORDER BY date ASC" % (lastdays)],
]
projects = runQueries(projectdbs=projectdbs, queries=queries)
select = generateHTMLSelect(projects)
var1 = []
var2 = []
var3 = []
for project, values in projects:
var1.append(values["Deletions"])
var2.append(values["Article deletions"])
var3.append(values["Restorations"])
js = """function p() {
var d1 = %s;
var d2 = %s;
var d3 = %s;
var placeholder = $("#placeholder");
var selected = document.getElementById('projects').selectedIndex;
var data = [{ data: d1[selected], label: "Deletions"}, { data: d2[selected], label: "Article deletions"}, { data: d3[selected], label: "Restorations"}];
var options = { xaxis: { mode: "time" }, lines: {show: true}, points: {show: true}, legend: {noColumns: 3}, grid: { hoverable: true }, };
$.plot(placeholder, data, options);
}
p();""" % (str(var1), str(var2), str(var3))
output = generateHTML(title=title, description=description, select=select, js=js)
writeHTML(filename=filename, output=output)
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,346,915,588,700,290,000
| 51.827586
| 337
| 0.666123
| false
| 3.462147
| false
| false
| false
|
talkoopaiva/talkoohakemisto-api
|
tests/views/test_types.py
|
1
|
3151
|
import operator
from flask import url_for
import pytest
from talkoohakemisto import serializers
from talkoohakemisto.extensions import db
from tests import factories
@pytest.mark.usefixtures('request_ctx', 'database')
class TestTypeIndex(object):
@pytest.fixture
def types(self):
types = [
factories.VoluntaryWorkTypeFactory(),
factories.VoluntaryWorkTypeFactory(),
]
db.session.commit()
return types
@pytest.fixture
def response(self, client, types):
return client.get(url_for('type.index'))
def test_url(self):
assert url_for('type.index') == '/types'
def test_returns_200(self, response):
assert response.status_code == 200
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_types_as_json(self, response, types):
serializer = serializers.VoluntaryWorkTypeSerializer(
sorted(types, key=operator.attrgetter('name')),
many=True
)
assert response.json == {
'types': serializer.data
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestTypeGetSingle(object):
@pytest.fixture
def type(self):
type = factories.VoluntaryWorkTypeFactory()
db.session.commit()
return type
@pytest.fixture
def response(self, client, type):
return client.get(url_for('type.get', id=type.id))
def test_url(self):
assert url_for('type.get', id=123) == '/types/123'
def test_returns_200(self, response):
assert response.status_code == 200
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_type_as_json(self, response, type):
serializer = serializers.VoluntaryWorkTypeSerializer(
[type],
many=True
)
assert response.json == {
'types': serializer.data
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestTypeGetSingleWhenNotFound(object):
@pytest.fixture
def response(self, client):
return client.get(url_for('type.get', id=12345))
def test_returns_404(self, response):
assert response.status_code == 404
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_error_as_json(self, response):
assert response.json == {
'message': 'Not found'
}
@pytest.mark.usefixtures('request_ctx', 'database')
class TestTypeGetSingleWithNonIntegerID(object):
@pytest.fixture
def response(self, client):
return client.get('/types/foobar')
def test_returns_404(self, response):
assert response.status_code == 404
def test_response_has_proper_content_type(self, response):
assert response.mimetype == 'application/vnd.api+json'
def test_returns_error_as_json(self, response):
assert response.json == {
'message': 'Not found'
}
|
mit
| -4,321,390,514,477,213,000
| 27.908257
| 62
| 0.645509
| false
| 3.928928
| true
| false
| false
|
jmescuderojustel/codeyourblogin-python-django-1.7
|
src/blog/tools.py
|
1
|
1304
|
from django.conf import settings
import math
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, redirect
class Pager:
def __init__(self, page, count):
if page is None or int(page) < 1:
page = 1
else:
page= int(page)
self.currentPage = page
self.downLimit = (page - 1) * settings.PAGE_SIZE
self.upLimit = page * settings.PAGE_SIZE
self.pages = [page-2, page-1, page, page+1, page+2]
self.finalPage = int(math.ceil(float(count) / float(settings.PAGE_SIZE)))
def buildPager(page, count):
return Pager(page, count)
def render_with_user(request, url, template, data, requires_user=True):
data['currentUrl'] = url
current_user = request.session['currentUser']
if current_user is not None:
data['current_user'] = current_user['name']
return render(request, template, data)
elif requires_user is False:
data['current_user'] = ''
return render(request, template, data)
else:
return redirect('/user/login')
def render_with_user_opt(request, url, template, data):
return render_with_user(request, url, template, data, False)
def is_user(request):
return (request.session['currentUser'] is not None)
|
mit
| 731,154,896,484,565,500
| 22.727273
| 81
| 0.644939
| false
| 3.7151
| false
| false
| false
|
maxalbert/colormap-selector
|
color_transformations_skimage.py
|
1
|
1823
|
import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
mit
| 1,315,041,971,995,979,500
| 30.982456
| 71
| 0.635217
| false
| 3.053601
| false
| false
| false
|
improve-project/platform
|
models/RehabilitationSetClass.py
|
1
|
1448
|
__author__ = 'tommipor'
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Text, DateTime
Base = declarative_base()
class RehabilitationSetClass(Base):
__tablename__ = 'RehabilitationSet'
rehabilitationSetID = Column(String(255), primary_key=True)
allowedOrganizations = Column(Text)
exerciseResultIDs = Column(Text)
patientConditionIDs = Column(Text)
patientInformationID = Column(String(255))
def __init__(self, rehabilitationSetID, allowedOrganizations, exerciseResultIDs, patientConditionIDs, patientInformationID):
self.rehabilitationSetID = rehabilitationSetID
self.exerciseResultIDs = exerciseResultIDs
self.patientConditionIDs = patientConditionIDs
self.patientInformationID= patientInformationID
self.allowedOrganizations = allowedOrganizations;
def __repr__(self):
return "<RehabilitationSet(%s, %s, %s, %s, %s)>" % (self.rehabilitationSetID, self.allowedOrganizations, self.exerciseResultIDs, self.patientConditionIDs, self.patientInformationID)
@property
def columns(self):
return [ c.name for c in self.__table__.columns ]
@property
def columnitems(self):
return dict([ (c, getattr(self, c)) for c in self.columns ])
def tojson(self):
return self.columnitems
|
bsd-3-clause
| -780,814,773,737,549,400
| 41.617647
| 197
| 0.679558
| false
| 4.01108
| false
| false
| false
|
SonyCSL/CSLAIER
|
src/common/nvidia_devices_info.py
|
1
|
11234
|
#!/usr/bin/env python2
import ctypes
import platform
from logging import getLogger
logger = getLogger(__name__)
class c_cudaDeviceProp(ctypes.Structure):
"""
Passed to cudart.cudaGetDeviceProperties()
"""
_fields_ = [
('name', ctypes.c_char * 256),
('totalGlobalMem', ctypes.c_size_t),
('sharedMemPerBlock', ctypes.c_size_t),
('regsPerBlock', ctypes.c_int),
('warpSize', ctypes.c_int),
('memPitch', ctypes.c_size_t),
('maxThreadsPerBlock', ctypes.c_int),
('maxThreadsDim', ctypes.c_int * 3),
('maxGridSize', ctypes.c_int * 3),
('clockRate', ctypes.c_int),
('totalConstMem', ctypes.c_size_t),
('major', ctypes.c_int),
('minor', ctypes.c_int),
('textureAlignment', ctypes.c_size_t),
('texturePitchAlignment', ctypes.c_size_t),
('deviceOverlap', ctypes.c_int),
('multiProcessorCount', ctypes.c_int),
('kernelExecTimeoutEnabled', ctypes.c_int),
('integrated', ctypes.c_int),
('canMapHostMemory', ctypes.c_int),
('computeMode', ctypes.c_int),
('maxTexture1D', ctypes.c_int),
('maxTexture1DMipmap', ctypes.c_int),
('maxTexture1DLinear', ctypes.c_int),
('maxTexture2D', ctypes.c_int * 2),
('maxTexture2DMipmap', ctypes.c_int * 2),
('maxTexture2DLinear', ctypes.c_int * 3),
('maxTexture2DGather', ctypes.c_int * 2),
('maxTexture3D', ctypes.c_int * 3),
('maxTexture3DAlt', ctypes.c_int * 3),
('maxTextureCubemap', ctypes.c_int),
('maxTexture1DLayered', ctypes.c_int * 2),
('maxTexture2DLayered', ctypes.c_int * 3),
('maxTextureCubemapLayered', ctypes.c_int * 2),
('maxSurface1D', ctypes.c_int),
('maxSurface2D', ctypes.c_int * 2),
('maxSurface3D', ctypes.c_int * 3),
('maxSurface1DLayered', ctypes.c_int * 2),
('maxSurface2DLayered', ctypes.c_int * 3),
('maxSurfaceCubemap', ctypes.c_int),
('maxSurfaceCubemapLayered', ctypes.c_int * 2),
('surfaceAlignment', ctypes.c_size_t),
('concurrentKernels', ctypes.c_int),
('ECCEnabled', ctypes.c_int),
('pciBusID', ctypes.c_int),
('pciDeviceID', ctypes.c_int),
('pciDomainID', ctypes.c_int),
('tccDriver', ctypes.c_int),
('asyncEngineCount', ctypes.c_int),
('unifiedAddressing', ctypes.c_int),
('memoryClockRate', ctypes.c_int),
('memoryBusWidth', ctypes.c_int),
('l2CacheSize', ctypes.c_int),
('maxThreadsPerMultiProcessor', ctypes.c_int),
('streamPrioritiesSupported', ctypes.c_int),
('globalL1CacheSupported', ctypes.c_int),
('localL1CacheSupported', ctypes.c_int),
('sharedMemPerMultiprocessor', ctypes.c_size_t),
('regsPerMultiprocessor', ctypes.c_int),
('managedMemSupported', ctypes.c_int),
('isMultiGpuBoard', ctypes.c_int),
('multiGpuBoardGroupID', ctypes.c_int),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_int * 128),
# added later with cudart.cudaDeviceGetPCIBusId
# (needed by NVML)
('pciBusID_str', ctypes.c_char * 16),
]
class struct_c_nvmlDevice_t(ctypes.Structure):
"""
Handle to a device in NVML
"""
pass # opaque handle
c_nvmlDevice_t = ctypes.POINTER(struct_c_nvmlDevice_t)
class c_nvmlMemory_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetMemoryInfo()
"""
_fields_ = [
('total', ctypes.c_ulonglong),
('free', ctypes.c_ulonglong),
('used', ctypes.c_ulonglong),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_ulonglong * 8),
]
class c_nvmlUtilization_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetUtilizationRates()
"""
_fields_ = [
('gpu', ctypes.c_uint),
('memory', ctypes.c_uint),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_uint * 8),
]
def get_library(name):
"""
Returns a ctypes.CDLL or None
"""
try:
if platform.system() == 'Windows':
return ctypes.windll.LoadLibrary(name)
else:
return ctypes.cdll.LoadLibrary(name)
except OSError:
pass
return None
def get_cudart():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
arch = platform.architecture()[0]
for ver in range(90, 50, -5):
cudart = get_library('cudart%s_%d.dll' % (arch[:2], ver))
if cudart is not None:
return cudart
elif platform.system() == 'Darwin':
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.%d.%d.dylib' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.dylib')
else:
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.so.%d.%d' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.so')
return None
def get_nvml():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
return get_library('nvml.dll')
else:
for name in (
'libnvidia-ml.so.1',
'libnvidia-ml.so',
'nvml.so'):
nvml = get_library(name)
if nvml is not None:
return nvml
return None
devices = None
def get_devices(force_reload=False):
"""
Returns a list of c_cudaDeviceProp's
Prints an error and returns None if something goes wrong
Keyword arguments:
force_reload -- if False, return the previously loaded list of devices
"""
global devices
if not force_reload and devices is not None:
# Only query CUDA once
return devices
devices = []
cudart = get_cudart()
if cudart is None:
return []
# check CUDA version
cuda_version = ctypes.c_int()
rc = cudart.cudaRuntimeGetVersion(ctypes.byref(cuda_version))
if rc != 0:
logger.error('cudaRuntimeGetVersion() failed with error #%s' % rc)
return []
if cuda_version.value < 6050:
logger.error('ERROR: Cuda version must be >= 6.5, not "%s"' % cuda_version.valu)
return []
# get number of devices
num_devices = ctypes.c_int()
rc = cudart.cudaGetDeviceCount(ctypes.byref(num_devices))
if rc != 0:
logger.error('cudaGetDeviceCount() failed with error #%s' % rc)
return []
# query devices
for x in xrange(num_devices.value):
properties = c_cudaDeviceProp()
rc = cudart.cudaGetDeviceProperties(ctypes.byref(properties), x)
if rc == 0:
pciBusID_str = ' ' * 16
# also save the string representation of the PCI bus ID
rc = cudart.cudaDeviceGetPCIBusId(ctypes.c_char_p(pciBusID_str), 16, x)
if rc == 0:
properties.pciBusID_str = pciBusID_str
devices.append(properties)
else:
logger.error('cudaGetDeviceProperties() failed with error #%s' % rc)
del properties
return devices
def get_device(device_id):
"""
Returns a c_cudaDeviceProp
"""
return get_devices()[int(device_id)]
def get_nvml_info(device_id):
"""
Gets info from NVML for the given device
Returns a dict of dicts from different NVML functions
"""
device = get_device(device_id)
if device is None:
return None
nvml = get_nvml()
if nvml is None:
return None
rc = nvml.nvmlInit()
if rc != 0:
raise RuntimeError('nvmlInit() failed with error #%s' % rc)
try:
# get device handle
handle = c_nvmlDevice_t()
rc = nvml.nvmlDeviceGetHandleByPciBusId(ctypes.c_char_p(device.pciBusID_str), ctypes.byref(handle))
if rc != 0:
raise RuntimeError('nvmlDeviceGetHandleByPciBusId() failed with error #%s' % rc)
# Grab info for this device from NVML
info = {
'minor_number': device_id,
'product_name': device.name
}
uuid = ' ' * 41
rc = nvml.nvmlDeviceGetUUID(handle, ctypes.c_char_p(uuid), 41)
if rc == 0:
info['uuid'] = uuid[:-1]
temperature = ctypes.c_int()
rc = nvml.nvmlDeviceGetTemperature(handle, 0, ctypes.byref(temperature))
if rc == 0:
info['temperature'] = temperature.value
speed = ctypes.c_uint()
rc = nvml.nvmlDeviceGetFanSpeed(handle, ctypes.byref(speed))
if rc == 0:
info['fan'] = speed.value
power_draw = ctypes.c_uint()
rc = nvml.nvmlDeviceGetPowerUsage(handle, ctypes.byref(power_draw))
if rc == 0:
info['power_draw'] = power_draw.value
power_limit = ctypes.c_uint()
rc = nvml.nvmlDeviceGetPowerManagementLimit(handle, ctypes.byref(power_limit))
if rc == 0:
info['power_limit'] = power_limit.value
memory = c_nvmlMemory_t()
rc = nvml.nvmlDeviceGetMemoryInfo(handle, ctypes.byref(memory))
if rc == 0:
info['memory_total'] = memory.total
info['memory_used'] = memory.used
utilization = c_nvmlUtilization_t()
rc = nvml.nvmlDeviceGetUtilizationRates(handle, ctypes.byref(utilization))
if rc == 0:
info['gpu_util'] = utilization.gpu
return info
finally:
rc = nvml.nvmlShutdown()
if rc != 0:
pass
def add_unit(data):
temperature = 'temperature'
if temperature in data:
data[temperature] = '{} C'.format(data[temperature])
fan = 'fan'
if fan in data:
data[fan] = '{} %'.format(data[fan])
power_draw = 'power_draw'
if power_draw in data:
data[power_draw] = '{:.2f} W'.format(float(data[power_draw]) / pow(10, 3))
power_limit = 'power_limit'
if power_limit in data:
data[power_limit] = '{:.2f} W'.format(float(data[power_limit]) / pow(10, 3))
memory_total = 'memory_total'
if memory_total in data:
data[memory_total] = '{} MiB'.format(data[memory_total] / pow(2, 20))
memory_used = 'memory_used'
if memory_used in data:
data[memory_used] = '{} MiB'.format(data[memory_used] / pow(2, 20))
gpu_util = 'gpu_util'
if gpu_util in data:
data[gpu_util] = '{} %'.format(data[gpu_util])
def get_devices_info():
if not len(get_devices()):
return None
nvml = get_nvml()
nvml.nvmlInit()
version = ' ' * 80
nvml.nvmlSystemGetDriverVersion(ctypes.c_char_p(version), 80)
version = version.strip()[:-1]
gpus = []
for i, device in enumerate(get_devices()):
info = get_nvml_info(i)
if info:
gpus.append(info)
for gpu in gpus:
add_unit(gpu)
return {
'gpus': gpus,
'driver_version': version
}
|
mit
| -8,568,659,771,402,383,000
| 30.205556
| 107
| 0.57504
| false
| 3.498599
| false
| false
| false
|
ekholabs/ekholabs-es
|
service/ElasticsearchService.py
|
1
|
1093
|
from ElasticsearchConnection import Resource
from uuid import uuid4
class ElasticsearchIndex:
@staticmethod
def create(index_name, settings):
es = Resource().connect()
index = es.indices.create(index=index_name, ignore=400, body=settings)
return index
@staticmethod
def delete_index(index_name):
es = Resource().connect()
index = es.indices.delete(index=index_name, ignore=[400, 404])
return index
@staticmethod
def index(index_name, document_type, payload):
es = Resource().connect()
index = es.index(index=index_name, doc_type=document_type, id=uuid4(), body=payload)
return index
@staticmethod
def query(index_name, query_criteria):
es = Resource().connect()
index = es.search(index=index_name, body=query_criteria)
return index
@staticmethod
def delete_document(index_name, document_type, document_id):
es = Resource().connect()
index = es.delete(index=index_name, doc_type=document_type, id=document_id)
return index
|
mit
| -8,110,233,009,886,834,000
| 27.025641
| 92
| 0.651418
| false
| 4.063197
| false
| false
| false
|
lexelby/apiary
|
historical/mysql_watcher/dblibs/dbutil.py
|
1
|
47774
|
#!/usr/bin/env python
#
# $LicenseInfo:firstyear=2007&license=mit$
#
# Copyright (c) 2007-2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
#
#
# Utility classes that allow us to monitor and keep track of databases
#
import array
import binascii
import gzip
import math
import os
import re
import socket
import string
import struct
import sys
import time
from llbase import llsd
def asciify(str):
"Lame ASCIIfication of a string to keep various things from barfing"
out_str = ""
for ch in str:
if (ch >= chr(0x9)) and (ch <= '~'):
out_str += ch
else:
out_str += "."
return out_str
def all_as_maps(cursor):
"""Return all of the cursor with maps for each row instead of sequences"""
all_seq = cursor.fetchall()
ret_all = []
descs = cursor.description
for row in all_seq:
new_row = {}
count = 0
for desc in descs:
new_row[desc[0]] = row[count]
count += 1
ret_all.append(new_row)
return ret_all
#
# Cache IP to string lookup to make it faster
#
ip_table = {}
def lookup_ip_string(ip_bin):
if not ip_bin in ip_table:
ip_table[ip_bin] = "%d.%d.%d.%d" % ((ip_bin & 0xff000000L) >> 24,
(ip_bin & 0x00ff0000L) >> 16,
(ip_bin & 0x0000ff00L) >> 8,
ip_bin & 0x000000ffL)
return ip_table[ip_bin]
def llquery_from_llsd(query_llsd):
# Hack, fill in arbitary data for info that isn't serialized
query = LLQuery(None, None, query_llsd['query'], 0.0)
query.mData['host_clean'] = query_llsd['host_clean']
query.mData['query_clean'] = query_llsd['query_clean']
# Hack, keeps correctOutliers from trashing the data
#query.mNumQueries = query_llsd['num_queries']
#query.mTotalTime = query_llsd['total_time']
try:
query.mNumQueriesCorrected = query_llsd['num_queries_corrected']
query.mTotalTimeCorrected = query_llsd['total_time_corrected']
except:
# Hack for old output which didn't generate this data
query.mNumQueriesCorrected = query_llsd['num_queries']
query.mTotalTimeCorrected = query_llsd['total_time']
return query
def get_query_tables(query):
"Return the list of tables in a query"
#
# Really dumb method, literally iterates through a bunch of regular expressions to pull this out.
# There are probably better methods out there.
#
out_tables = []
# Clean up the query
query = query.replace('\n',' ')
query = re.sub('\s+', ' ', query)
m = LLQuery.sSelectWhereRE.match(query)
if m:
# Split apart by commas
tables = m.group(1).split(',')
for table in tables:
# Take the first part (which is table name)
out_tables.append(string.strip(table.split()[0]))
return out_tables
m = LLQuery.sSelectRE.match(query)
if m:
out_tables.append(string.strip(m.group(1)))
return out_tables
m = LLQuery.sUpdateRE.match(query)
if m:
# Split apart by commas
tables = m.group(1).split(',')
for table in tables:
# Take the first part (which is table name)
out_tables.append(string.strip(table.split()[0]))
return out_tables
m = LLQuery.sReplaceRE.match(query)
if m:
out_tables.append(string.strip(m.group(1)))
return out_tables
m = LLQuery.sInsertRE.match(query)
if m:
out_tables.append(string.strip(m.group(1)))
return out_tables
m = LLQuery.sDeleteRE.match(query)
if m:
out_tables.append(string.strip(m.group(1)))
return out_tables
return out_tables
MIN_BIN=-15
MAX_BIN=10
class LLQuery:
"Represents all of the data associated with a query"
fromLLSDStats = staticmethod(llquery_from_llsd)
def __init__(self, host, port, query, start_time):
# Store information which will be serialized for metadata in a map
self.mData = {}
self.mData['host'] = host
self.mData['port'] = port
self.mData['query'] = query
# Metadata
self.mData['host_clean'] = None
self.mData['host_full'] = None
self.mData['query_clean'] = None
self.mData['tables'] = []
#
# Stats information
#
self.mNumQueries = 0
self.mTotalTime = 0.0
self.mOutQueries = 0
self.mTotalTimeCorrected = 0.0 # Corrected to remove outliers
self.mNumQueriesCorrected = 0 # Corrected to remove outliers
# LLQueryStatBins for the query time histogram, as well as corrected time
# Query times are collected into bins based on power of 2 execution times (in seconds).
# Each bin collects the number of queries and total execution time. See LLQueryStatBin
# for more details
self.mBins = {} # Bins for histogram
# This stuff doesn't usually get serialized
self.mQueryLen = len(query)
self.mStartTime = start_time
self.mResponseTime = start_time
def __hash__(self):
return (self.mData['host_clean'] + ":" + self.mData['query_clean']).__hash__()
def __eq__(self, other):
# Note, this matches on clean, not strictly correct
if ((self.mData['query_clean'] == other.mData['query_clean']) and
(self.mData['host_clean'] == other.mData['host_clean'])):
return True
return False
def getKey(self):
# The string key is just the clean host and query, concatenated
return self.mData['host_clean'] + ":" + self.mData['query_clean']
def clean(self):
"Generate the clean query so it can be used for statistics"
if not self.mData['host_clean']:
(self.mData['host_clean'], self.mData['host_full']) = get_host_type(self.mData['host'])
self.mData['query_clean'] = clean_query(self.mData['query'], 0)
def getAvgTimeCorrected(self):
"Average time per query, corrected for outliers"
return self.mTotalTimeCorrected/self.mNumQueriesCorrected
def queryStart(self):
"When collecting query stats, use this when the query is receieved"
self.mNumQueries += 1
self.mOutQueries += 1
def queryResponse(self, elapsed):
"When collecting stats, use this when the response is received"
self.mTotalTime += elapsed
self.mOutQueries -=1
# Determine which stat bin this query is in
bin = MIN_BIN
if elapsed:
bin = int(math.log(elapsed,2))
bin = max(MIN_BIN, bin)
bin = min(MAX_BIN, bin)
if bin not in self.mBins:
self.mBins[bin] = LLQueryStatBin(bin)
self.mBins[bin].accumulate(elapsed)
def correctOutliers(self):
"Find outliers bins and calculate corrected results"
# Outlier bins have query counts which are 3 orders of magnitude less than the total count for that query
if not self.mNumQueries:
# FIXME: This is a hack because we don't save this information in the query count dump
return
min_queries = self.mNumQueries/100
self.mTotalTimeCorrected = 0.0
self.mNumQueriesCorrected = 0
for i in self.mBins.keys():
if self.mBins[i].mNumQueries < min_queries:
# Outlier, flag as such.
self.mBins[i].mOutlier = True
else:
self.mTotalTimeCorrected += self.mBins[i].mTotalTime
self.mNumQueriesCorrected += self.mBins[i].mNumQueries
if self.mNumQueriesCorrected == 0:
#HACK: Deal with divide by zero
self.mNumQueriesCorrected = 1
# Miscellaneous regular expressions to analyze the query type
sReadRE = re.compile("(SELECT.*)|(USE.*)", re.IGNORECASE)
sSelectWhereRE = re.compile("\(?\s*?SELECT.+?FROM\s+\(?(.*?)\)?\s+WHERE.*", re.IGNORECASE)
sSelectRE = re.compile("\(?\s*?SELECT.+?FROM\s+(.+)(?:\s+LIMIT.*|.*)", re.IGNORECASE)
sUpdateRE = re.compile("UPDATE\s+(.+?)\s+SET.*", re.IGNORECASE)
sReplaceRE = re.compile("REPLACE INTO\s+(.+?)(?:\s*\(|\s+SET).*", re.IGNORECASE)
sInsertRE = re.compile("INSERT.+?INTO\s+(.+?)(?:\s*\(|\s+SET).*", re.IGNORECASE)
sDeleteRE = re.compile("DELETE.+?FROM\s+(.+?)\s+WHERE.*", re.IGNORECASE)
def analyze(self):
"Does some query analysis on the query"
query = self.mData['query_clean']
self.mData['tables'] = get_query_tables(query)
if 'type' in self.mData:
# Already analyzed
return
if LLQuery.sReadRE.match(query):
self.mData['type'] = 'read'
else:
self.mData['type'] = 'write'
def dumpLine(self, elapsed, query_len = 0):
"Dump a semi-human-readable stats line for reporting"
bin_str = ''
for i in range(MIN_BIN,MAX_BIN+1):
if i in self.mBins:
if self.mBins[i].mOutlier:
bin_str += '*'
else:
bin_str += str(int(math.log10(self.mBins[i].mNumQueries)))
else:
bin_str += '.'
if not query_len:
query_len = 4096
num_queries = self.mNumQueriesCorrected
if not num_queries:
num_queries = 1
return ("%s\t%5d\t%6.2f\t%6.2f\t%1.4f\t%s\t" % (bin_str, num_queries,
num_queries/elapsed, self.mTotalTimeCorrected,
self.mTotalTimeCorrected/num_queries, self.mData['host_clean'])) \
+ self.mData['query_clean'][0:query_len]
def as_map(self):
"Make an LLSD map version of data that can be used for merging"
self.analyze()
self.mData['num_queries'] = self.mNumQueries
self.mData['total_time'] = self.mTotalTime
self.mData['num_queries_corrected'] = self.mNumQueriesCorrected
self.mData['total_time_corrected'] = self.mTotalTimeCorrected
return self.mData
class LLConnStatus:
"Keeps track of the status of a connection talking to mysql"
def __init__(self, ip_port, start_time):
self.mLastMysqlPacketNumber = 0
self.mNumPackets = 0
self.mIPPort = ip_port
self.mStartTime = start_time
self.mLastUpdate = start_time
self.mCurState = ""
self.mLastQuery = None
self.mNumQueries = 0
def quit(self, src_ip, src_port, pkt_time):
query = LLQuery(src_ip, src_port, "Quit", pkt_time)
query.clean()
self.mLastUpdate = pkt_time
self.mLastQuery = query
self.mNumPackets += 1
def queryStart(self, src_ip, src_port, pkt_time, raw, pkt_len, offset):
query_len = pkt_len - 1
query = LLQuery(src_ip, src_port, raw[offset:offset + (pkt_len - 1)], pkt_time)
self.mLastUpdate = pkt_time
# Packet length includes the command, offset into raw doesn't
if query_len > (len(raw) - offset):
query.mQueryLen = query_len
self.mCurState = "SendingQuery"
else:
self.mCurState = "QuerySent"
query.clean()
self.mNumQueries += 1
self.mLastQuery = query
self.mNumPackets += 1
def queryStartProcessed(self, src_ip, src_port, pkt_time, query_str):
query = LLQuery(src_ip, src_port, query_str, pkt_time)
query.clean()
self.mLastUpdate = pkt_time
self.mCurState = "QuerySent"
self.mNumQueries += 1
self.mLastQuery = query
self.mNumPackets += 1
def updateNonCommand(self, pkt_time, raw):
# Clean up an existing query if you get a non-command.
self.mNumPackets += 1
self.mLastUpdate = pkt_time
if self.mLastQuery:
if self.mCurState == "SendingQuery":
# We're continuing a query
# We won't generate a new clean version, because it'll $!@# up all the sorting.
self.mLastQuery.mData['query'] += raw
if len(self.mLastQuery.mData['query']) == self.mLastQuery.mQueryLen:
self.mCurState = "QuerySent"
self.mLastQuery.clean()
return
else:
#
# A non-command that's continuing a query. Not sure why this is happening,
# but clear the last query to avoid generating inadvertent long query results.
#
self.mLastQuery = None
# Default to setting state to "NonCommand"
self.mCurState = "NonCommand"
def updateResponse(self, pkt_time, result_type):
# If we've got a query running, accumulate the elapsed time
start_query_response = False
if self.mCurState == "QuerySent":
lq = self.mLastQuery
if lq:
if lq.mStartTime == 0.0:
lq.mStartTime = pkt_time
lq.mResponseTime = pkt_time
start_query_response = True
self.mLastUpdate = pkt_time
if result_type == 0:
self.mCurState = "Result:RecvOK"
elif result_type == 0xff:
self.mCurState = "Result:Error"
elif result_type == 0xfe:
self.mCurState = "Result:EOF"
elif result_type == 0x01:
self.mCurState = "Result:Header"
else:
self.mCurState = "Result:Data"
return start_query_response
def dump(self):
if self.mLastQuery:
print "%s: NumQ: %d State:%s\n\tLast: %s" % (self.mIPPort, self.mNumQueries, self.mCurState,
self.mLastQuery.mData['query_clean'][0:40])
else:
print "%s: NumQ: %d State:%s\n\tLast: None" % (self.mIPPort, self.mNumQueries, self.mCurState)
class LLQueryStatBin:
"Keeps track of statistics for one query bin"
def __init__(self, power):
self.mMinTime = pow(2, power)
self.mMaxTime = pow(2, power+1)
self.mTotalTime = 0
self.mNumQueries = 0
self.mOutlier = False
def accumulate(self, elapsed):
self.mTotalTime += elapsed
self.mNumQueries += 1
def dump_query_stat_header():
return "LogHistogram (-15:10) \tCount\tQPS\tTotal\tAvg\tHost\tQuery"
class LLQueryStatMap:
def __init__(self, description, start_time):
self.mDescription = description
self.mQueryMap = {}
self.mStartTime = start_time
self.mFinalTime = 0
self.mLastTime = self.mStartTime
self.mQueryStartCount = 0
self.mQueryResponseCount = 0
def load(self, fn):
"Load dumped query stats from an LLSD file"
# Read in metadata
in_file = open(fn)
in_string = in_file.read()
in_file.close()
in_llsd = llsd.LLSD.parse(in_string)
info = in_llsd[0]
query_list = in_llsd[1]
self.mDescription = info['description']
self.mStartTime = info['start_time']
self.mLastTime = info['last_time']
self.mFinalTime = info['last_time']
self.mQueryStartCount = info['query_start_count']
self.mQueryResponseCount = info['query_response_count']
# Iterate through all the queries, and populate the query map.
for query_row in query_list:
query = LLQuery.fromLLSDStats(query_row)
self.mQueryMap[query.getKey()] = query
def analyze(self):
for query in self.mQueryMap.values():
query.analyze()
def queryStart(self, query):
if not query in self.mQueryMap:
#query.analyze()
self.mQueryMap[query] = query
self.mQueryMap[query].queryStart()
# Update elapsed time for this map
self.mLastTime = query.mStartTime
if self.mLastTime < self.mStartTime:
self.mStartTime = self.mLastTime
if self.mLastTime > self.mFinalTime:
self.mFinalTime = self.mLastTime
self.mQueryStartCount += 1
def queryResponse(self, query):
if not query in self.mQueryMap:
self.queryStart(query)
elapsed = query.mResponseTime - query.mStartTime
self.mQueryMap[query].queryResponse(elapsed)
self.mLastTime = query.mResponseTime
if self.mLastTime > self.mFinalTime:
self.mFinalTime = self.mLastTime
self.mQueryResponseCount += 1
def getElapsedTime(self):
return self.mFinalTime - self.mStartTime
def getQPS(self):
return self.mQueryStartCount / self.getElapsedTime()
def correctOutliers(self):
for query in self.mQueryMap.values():
query.correctOutliers()
def getSortedKeys(self, sort_by = "total_time"):
"Gets a list of keys sorted by sort type"
self.correctOutliers()
items = self.mQueryMap.items()
backitems = None
if sort_by == "total_time":
backitems = [[v[1].mTotalTimeCorrected, v[0]] for v in items]
elif sort_by == "count":
backitems = [[v[1].mNumQueriesCorrected, v[0]] for v in items]
elif sort_by == "avg_time":
backitems = [[v[1].getAvgTimeCorrected(), v[0]] for v in items]
else:
# Fallback, sort by total time
backitems = [[v[1].mTotalTimeCorrected, v[0]] for v in items]
backitems.sort()
backitems.reverse()
# Get the keys out of the items
sorted = []
for pair in backitems:
sorted.append(pair[1])
return sorted
def getSortedStats(self, sort_by = "total_time", num_stats = 0):
"Gets a list of the top queries according to sort type"
sorted_keys = self.getSortedKeys(sort_by)
if num_stats == 0:
l = len(sorted_keys)
else:
l = min(num_stats, len(sorted_keys))
stats = []
for i in range(0, l):
stats.append(self.mQueryMap[sorted_keys[i]])
return stats
def dumpStatus(self, sort_type = "total_time", elapsed = None):
# Dump status according to total time
if not elapsed:
elapsed = self.getElapsedTime()
sorted_stats = self.getSortedStats(sort_type)
for query in sorted_stats:
print query.dumpLine(elapsed, 60)
def dumpLLSD(self, filename):
# Analyze queries to generate metadata
self.analyze()
# Dump an LLSD document representing the entire object
out = []
# First, dump all the metadata into the first block
info_map = {}
info_map['description'] = self.mDescription
info_map['start_time'] = self.mStartTime
info_map['last_time'] = self.mLastTime
info_map['query_start_count'] = self.mQueryStartCount
info_map['query_response_count'] = self.mQueryResponseCount
out.append(info_map)
# Dump all of the query info into the second block
sorted_stats = self.getSortedStats("total_time")
query_list = []
for query in sorted_stats:
query_list.append(query.as_map())
out.append(query_list)
f = open(filename, "w")
f.write(str(llsd.LLSD(out)))
f.close()
def dumpTiming(self, filename):
cur_time = time.time()
f = open(filename, "w")
f.write(dump_query_stat_header() + "\n")
# Sort the queries
sorted_stats = self.getSortedStats("total_time")
for query in sorted_stats:
f.write(query.dumpLine(cur_time - self.mStartTime))
f.write("\n")
f.close()
def dumpCountsLLSD(self, filename):
"Dump the query statistics as an LLSD doc, for later merging with the query_info doc"
out = []
# Put the metadata into a map
info_map = {}
info_map['description'] = self.mDescription
info_map['start_time'] = self.mStartTime
info_map['last_time'] = self.mLastTime
info_map['query_start_count'] = self.mQueryStartCount
info_map['query_response_count'] = self.mQueryResponseCount
out.append(info_map)
sorted_stats = self.getSortedStats("total_time")
query_list = []
for query in sorted_stats:
query_row = {}
# We only want to dump identifying info and stats, not metadata
query_row['host_clean'] = query.mData['host_clean']
# Convert the queries to utf-8 to make sure it doesn't break XML
try:
u = unicode(query.mData['query_clean'])
query_row['query_clean'] = u.encode('utf-8')
except:
query_row['query_clean'] = 'NON-UTF8'
try:
u = unicode(query.mData['query'])
query_row['query'] = u.encode('utf-8')
except:
query_row['query'] = 'NON-UTF8'
query_row['count'] = query.mNumQueriesCorrected
query_row['total_time'] = query.mTotalTimeCorrected
query_row['avg_time'] = query.getAvgTimeCorrected()
query_list.append(query_row)
out.append(query_list)
f = open(filename, "w")
f.write(str(llsd.LLSD(out)))
f.close()
class LLBinnedQueryStats:
"Keeps track of a fixed number of N minute bins of query stats"
def __init__(self):
self.mHourBins = {} # This will be keyed by unixtime seconds, eventually
self.mMinuteBins = {}
self.mLastUpdateHour = 0
self.mLastUpdateMinute = 0
def dumpTiming(self, path):
# Dump hour bins
for (key, value) in self.mHourBins.items():
value.dumpTiming("%s/hour-%s-query_timing.txt" % (path, key))
# Dump minute bins
for (key, value) in self.mMinuteBins.items():
value.dumpTiming("%s/minute-%s-query_timing.txt" % (path, key))
def dumpCountsLLSD(self, path):
# Dump hour bins
for (key, value) in self.mHourBins.items():
value.dumpCountsLLSD("%s/hour-%s-query_counts.llsd" % (path, key))
# Dump minute bins
for (key, value) in self.mMinuteBins.items():
value.dumpCountsLLSD("%s/minute-%s-query_counts.llsd" % (path, key))
def dumpLLSD(self, path):
# Dump hour bins
for (key, value) in self.mHourBins.items():
value.dumpLLSD("%s/hour-%s-query_dump.llsd" % (path, key))
# Dump minute bins
for (key, value) in self.mMinuteBins.items():
value.dumpLLSD("%s/minute-%s-query_dump.llsd" % (path, key))
def flushOldBins(self, time_secs):
for minute_bin_str in self.mMinuteBins.keys():
bin_secs = time.mktime(time.strptime(minute_bin_str, "%Y-%m-%d-%H-%M"))
if (time_secs - bin_secs) > 3*3600:
del self.mMinuteBins[minute_bin_str]
def queryStart(self, query):
"Update associated bin for the time specified, creating if necessary"
# Hour and minute bins
t = time.localtime(query.mStartTime)
hour_bin_str = time.strftime("%Y-%m-%d-%H", t)
minute_bin_str = time.strftime("%Y-%m-%d-%H-%M", t)
hour = t[3]
minute = t[4]
# FIXME: These start times are a bit inaccurate, but should be fine under heavy query load.
if not hour_bin_str in self.mHourBins:
self.mHourBins[hour_bin_str] = LLQueryStatMap(hour_bin_str, query.mStartTime)
if not minute_bin_str in self.mMinuteBins:
self.mMinuteBins[minute_bin_str] = LLQueryStatMap(minute_bin_str, query.mStartTime)
self.mHourBins[hour_bin_str].queryStart(query)
self.mMinuteBins[minute_bin_str].queryStart(query)
if hour != self.mLastUpdateHour:
self.mLastUpdateHour = hour
# If the hour changes, dump and clean out old bins
self.flushOldBins(query.mStartTime)
def queryResponse(self, query):
"Update associated bin for the time specified, creating if necessary"
# Hour and minute bins
t = time.localtime(query.mStartTime)
hour_bin_str = time.strftime("%Y-%m-%d-%H", t)
minute_bin_str = time.strftime("%Y-%m-%d-%H-%M", t)
hour = t[3]
minute = t[4]
# FIXME: These start times are a bit inaccurate, but should be fine under heavy query load.
if not hour_bin_str in self.mHourBins:
self.mHourBins[hour_bin_str] = LLQueryStatMap(hour_bin_str, query.mStartTime)
if not minute_bin_str in self.mMinuteBins:
self.mMinuteBins[minute_bin_str] = LLQueryStatMap(hour_bin_str, query.mStartTime)
self.mHourBins[hour_bin_str].queryResponse(query)
self.mMinuteBins[minute_bin_str].queryResponse(query)
# MySQL protocol sniffer, using tcpdump, ncap packet parsing and mysql internals
# http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol
class LLQueryStream:
"Process a raw tcpdump stream (in raw libpcap format)"
def __init__(self, in_file):
self.mInFile = in_file
self.mStartTime = time.time()
#
# A list of all outstanding "connections", and what they're doing.
# This is necessary in order to get script timing and other information.
#
self.mConnStatus = {}
self.mConnKeys = []
self.mConnCleanupIndex = 0
#
# Parse/skip past the libpcap global header
#
#guint32 magic_number; /* magic number */
#guint16 version_major; /* major version number */
#guint16 version_minor; /* minor version number */
#gint32 thiszone; /* GMT to local correction */
#guint32 sigfigs; /* accuracy of timestamps */
#guint32 snaplen; /* max length of captured packets, in octets */
#guint32 network; /* data link type */
# Skip past the libpcap global header
format = 'IHHiIII'
size = struct.calcsize(format)
header_bin = self.mInFile.read(size)
res = struct.unpack(format, header_bin)
def createConnection(self, client_ip_port, pkt_time):
# Track the connection, create a new one or return existing
if not client_ip_port in self.mConnStatus:
self.mConnStatus[client_ip_port] = LLConnStatus(client_ip_port, pkt_time)
# Track a new key that we need to garbage collect
self.mConnKeys.append(client_ip_port)
conn = self.mConnStatus[client_ip_port]
return conn
def closeConnection(self, ip_port):
if ip_port in self.mConnStatus:
del self.mConnStatus[ip_port]
def cleanupConnection(self,cur_time):
# Cleanup some number of stale connections.
CONNECTION_EXPIRY=900.0
if self.mConnCleanupIndex >= len(self.mConnKeys):
self.mConnCleanupIndex = 0
# Skip if no keys
if len(self.mConnKeys) == 0:
return
key = self.mConnKeys[self.mConnCleanupIndex]
if key in self.mConnStatus:
# Clean up if it's too old
if self.mConnStatus[key].mLastUpdate < (cur_time - CONNECTION_EXPIRY):
del self.mConnStatus[key]
#print "Cleaning up old key:", key
#print "num conns:", len(self.mConnStatus)
#print "num keys", len(self.mConnKeys)
else:
# Clean up if the connection is already removed
del self.mConnKeys[self.mConnCleanupIndex]
self.mConnCleanupIndex += 1
def getNextEvent(self):
# Get the next event out of the packet stream
td_format = 'IIII'
ip_format = '!BBHHHBBHII'
tcp_format = '!HHIIBBHHH'
while 1:
#
# Parse out an individual packet from the tcpdump stream
#
# Match the packet header
# Pull a record (packet) off of the wire
# Packet header
# guint32 ts_sec; /* timestamp seconds */
# guint32 ts_usec; /* timestamp microseconds */
# guint32 incl_len; /* number of octets of packet saved in file */
# guint32 orig_len; /* actual length of packet */
ph_bin = self.mInFile.read(16)
res = struct.unpack(td_format, ph_bin)
ts_sec = res[0]
ts_usec = res[1]
pkt_time = ts_sec + (ts_usec/1000000.0)
incl_len = res[2]
orig_len = res[3]
# Packet data (incl_len bytes)
raw_data = self.mInFile.read(incl_len)
# Parse out the MAC header
# Don't bother, we don't care - 14 byte header
mac_offset = 14
# Parse out the IP header (min 20 bytes)
# 4 bits - version
# 4 bits - header length in 32 bit words
# 1 byte - type of service
# 2 bytes - total length
# 2 bytes - fragment identification
# 3 bits - flags
# 13 bits - fragment offset
# 1 byte - TTL
# 1 byte - Protocol (should be 6)
# 2 bytes - header checksum
# 4 bytes - source IP
# 4 bytes - dest IP
ip_header = struct.unpack(ip_format, raw_data[mac_offset:mac_offset + 20])
# Assume all packets are TCP
#if ip_header[6] != 6:
# print "Not TCP!"
# continue
src_ip_bin = ip_header[8]
src_ip = lookup_ip_string(src_ip_bin)
#src_ip = "%d.%d.%d.%d" % ((src_ip_bin & 0xff000000L) >> 24,
# (src_ip_bin & 0x00ff0000L) >> 16,
# (src_ip_bin & 0x0000ff00L) >> 8,
# src_ip_bin & 0x000000ffL)
dst_ip_bin = ip_header[9]
dst_ip = lookup_ip_string(dst_ip_bin)
#dst_ip = "%d.%d.%d.%d" % ((dst_ip_bin & 0xff000000L) >> 24,
# (dst_ip_bin & 0x00ff0000L) >> 16,
# (dst_ip_bin & 0x0000ff00L) >> 8,
# dst_ip_bin & 0x000000ffL)
ip_size = (ip_header[0] & 0x0f) * 4
# Parse out the TCP packet header
# 2 bytes - src_prt
# 2 bytes - dst_port
# 4 bytes - sequence number
# 4 bytes - ack number
# 4 bits - data offset (size in 32 bit words of header
# 6 bits - reserved
# 6 bits - control bits
# 2 bytes - window
# 2 bytes - checksum
# 2 bytes - urgent pointer
tcp_offset = mac_offset + ip_size
tcp_header = struct.unpack(tcp_format, raw_data[tcp_offset:tcp_offset+20])
tcp_size = ((tcp_header[4] & 0xf0) >> 4) * 4
src_port = tcp_header[0]
dst_port = tcp_header[1]
# 3 bytes - packet length
# 1 byte - packet number
# 1 byte - command
# <n bytes> - args
pkt_offset = tcp_offset + tcp_size
if len(raw_data) == pkt_offset:
continue
# Clearly not a mysql packet if it's less than 5 bytes of data
if len(raw_data) - pkt_offset < 5:
continue
src_ip_port = "%s:%d" % (src_ip, src_port)
dst_ip_port = "%s:%d" % (dst_ip, dst_port)
if src_port == 3306:
#
# We are processing traffic from mysql server -> client
# This primarily is used to time how long it takes for use
# to start receiving data to the client from the server.
#
mysql_arr = array.array('B', raw_data[pkt_offset])
result_type = ord(raw_data[pkt_offset])
# Get or create connection
conn = self.createConnection(dst_ip_port, pkt_time)
# Update the status of this connection, including query times on
# connections
if conn.updateResponse(pkt_time, result_type):
# Event: Initial query response
return "QueryResponse", conn.mLastQuery
continue
if dst_port == 3306:
#
# Processing a packet from the client to the server
#
# HACK! This is an easy place to put this where we can get packet time that only happens once or so per event.
# Garbage collect connections
self.cleanupConnection(pkt_time)
# Pull out packet length from the header
mysql_arr = array.array('B', raw_data[pkt_offset:pkt_offset+5])
pkt_len = mysql_arr[0] + (long(mysql_arr[1]) << 8) + (long(mysql_arr[2]) << 16)
pkt_number = mysql_arr[3]
# Find the connection associated with this packet
# Get or create connection
conn = self.createConnection(src_ip_port, pkt_time)
#if conn.mLastMysqlPacketNumber != (pkt_number - 1):
# print "Prev:", conn.mLastMysqlPacketNumber, "Cur:", pkt_number
conn.mLastMysqlPacketNumber = pkt_number
cmd = mysql_arr[4]
# If we're not a command, do stuff
if cmd > 0x1c:
# Unfortunately, we can't trivially tell the difference between
# various non-command packets
# Assume that these are all AuthResponses for now.
conn.updateNonCommand(pkt_time, raw_data[pkt_offset:])
if "QuerySent" == conn.mCurState:
return ("QueryStart", conn.mLastQuery)
continue
query = None
if cmd == 1:
# Event: Quitting a connection
conn.quit(src_ip, src_port, pkt_time)
# This connection is closing, get rid of it
self.closeConnection(src_ip_port)
return ("Quit", conn.mLastQuery)
elif cmd == 3:
# Event: Starting a query
conn.queryStart(src_ip, src_port, pkt_time, raw_data, pkt_len, pkt_offset + 5)
# Only return an QueryStart if we have the whole query
if "QuerySent" == conn.mCurState:
return ("QueryStart", conn.mLastQuery)
else:
pass
IP_PORT_RE = re.compile("(\S+):(\d+)")
EVENT_RE = re.compile("(\S+)\t(\S+):(\d+)\t(\S+)\t(\S+)")
SECTION_RE = re.compile("\*{38}")
class LLLogQueryStream:
"Process a query stream dump to generate a query stream class"
"Process a raw tcpdump stream (in raw libpcap format)"
def __init__(self, lineiter):
self.mLineIter = lineiter
self.mStartTime = None
#
# A list of all outstanding "connections", and what they're doing.
# This is necessary in order to get script timing and other information.
#
self.mConnStatus = {}
def closeConnection(self, ip_port):
if ip_port in self.mConnStatus:
del self.mConnStatus[ip_port]
def getNextEvent(self):
# Get the next event out of the file
cur_event = None
event_time = None
event_type = None
ip = None
port = None
ip_port = None
cur_state = 'Metadata'
for line in self.mLineIter:
if line == '':
return (None, None)
if cur_state == 'Metadata':
# We're looking for an event. Actually we better find one.
m = EVENT_RE.match(line)
if not m:
#raise "Missing event on line: %s" % line
continue
else:
event_time = float(m.group(1))
ip = m.group(2)
port = int(m.group(3))
ip_port = m.group(2)+":"+m.group(3)
clean_host = m.group(4)
event_type = m.group(5)
query_str = ''
cur_state = 'Query'
elif cur_state == 'Query':
if not SECTION_RE.match(line):
query_str += line
else:
# We're done
# Generate the event to return
# Track the connection if we don't know about it yet.
conn = self.createConnection(ip_port, event_time)
if event_type == 'QueryStart':
conn.queryStartProcessed(ip, port, event_time, query_str)
return ("QueryStart", conn.mLastQuery)
elif event_type == 'QueryResponse':
# Update the status of this connection, including query times on
# connections
# Hack: Result type defaults to zero
if conn.updateResponse(event_time, 0):
# Event: Initial query response
return ("QueryResponse", conn.mLastQuery)
else:
# Skip responses which we don't have the start for
cur_state = 'Metadata'
elif event_type == 'Quit':
# Event: Quitting a connection
conn.quit(ip, port, event_time)
# This connection is closing, get rid of it
self.closeConnection(ip_port)
return ("Quit", conn.mLastQuery)
else:
raise ("Unknown event type %s" % event_type)
return (None, None)
def start_dump(host, port):
# Start up tcpdump pushing data into netcat on the sql server
interface = "eth0"
# Start up tcpdump pushing data into netcat on the sql server
SRC_DUMP_CMD = "ssh root@%s '/usr/sbin/tcpdump -p -n -s 0 -w - -i %s dst port 3306 or src port 3306 | nc %s %d'" \
% (host, interface, socket.getfqdn(), port)
os.popen2(SRC_DUMP_CMD, "r")
def remote_mysql_stream(host):
# Create a server socket, then have tcpdump dump stuff to it.
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bound = False
port = 9999
while not bound:
try:
serversocket.bind((socket.gethostname(), port))
bound = True
except:
print port, " already bound, trying again"
port += 1
print "Bound port %d" % port
serversocket.listen(1)
# Fork off the dumper, start the server on the main connection
pid = os.fork()
if not pid:
# Child process which gets data from the database
time.sleep(1.0)
print "Starting dump!"
start_dump(host, port)
print "Exiting dump!"
sys.exit(0)
print "Starting server"
(clientsocket, address) = serversocket.accept()
print "Accepted connection", address
# Start listening to the data stream
return clientsocket.makefile("rb")
#
# Utility stuff for query cleaner
#
# This is a Python port of (part of) the fingerprint() function from
# the mk-query-digest script in Maatkit, added by Yoz, with various additions/tweaks
hex_wildcard = r"[0-9a-f]"
word = hex_wildcard + r"{4}-"
long_word = hex_wildcard + r"{8}-"
very_long_word = hex_wildcard + r"{12}"
UUID_REGEX_STRING = long_word + word + word + word + very_long_word
hex_re = re.compile("^[\da-f]+$",re.I)
uuid_re = re.compile("^"+UUID_REGEX_STRING+"$",re.I)
def string_replace(match):
"Called by string-matching regexp in replacers"
if uuid_re.match(match.group(1)):
return "*uuid*"
return "*string*"
# list of (match,replacement) tuples used by clean_query()
replacers = [
# Disabling comment removal because we may put useful inspection info in there
#(re.compile(r'(?:--|#)[^\'"\r\n]*(?=[\r\n]|\Z)',re.I),""), # one-line comments
#(re.compile(r"/\*[^!].*?\*/",re.I|re.M|re.S),""), # But not /*!version */
(re.compile(r"\\\\"),""), # remove backslash pairs that may confuse the next line
(re.compile(r"\\[\"']"),""), # remove escaped quotes
(re.compile(r'"([^"]*)"',re.I),string_replace), # quoted strings
(re.compile(r"'([^']*)'",re.I),string_replace), # quoted strings
# this next one may need more work, due to "UPDATE ... SET money = money-23"
# the next two are significantly different from the maatkit original code
(re.compile(r"(?<![\w\)\d])(\s*)\-\d+(\.\d+)?",re.I),"*num*"), # negative reals
(re.compile(r"(?<![\w])\d+(\.\d+)?",re.I),"*num*"), # positive reals
# mk-query-digest has s/[xb.+-]\?/?/g; as "clean up leftovers" here, whatever that means - I've left it out
(re.compile(r"^\s+",re.I),""), # chop off leading whitespace
(re.compile(r"\s+$",re.I|re.M|re.S),""), # kill trailing whitespace
# reduce IN and VALUES lists (look for previously-cleaned placeholders)
(re.compile(r"\b(in|values)(?:[\s,]*\(([\s\,]*\*(num|string|uuid)\*)*[\s,]*\))+",
re.I|re.X),"\\1(*values*)"), # collapse IN and VALUES lists
# This next one collapses chains of UNIONed functionally-identical queries,
# but it's only really useful if you're regularly seeing more than 2 queries
# in a chain. We don't seem to have any like that, so I'm disabling this.
#(re.compile(r"\b(select\s.*?)(?:(\sunion(?:\sall)?)\s\1)+",re.I),"\\1 -- repeat\\2 --"), # collapse UNION
# remove "OFFSET *num*" when following a LIMIT
(re.compile(r"\blimit \*num\*(?:, ?\*num\*| offset \*num\*)?",re.I),"LIMIT *num*")
]
prepare_re = re.compile('PREPARE.*', re.IGNORECASE)
deallocate_re = re.compile('DEALLOCATE\s+PREPARE.*', re.IGNORECASE)
execute_re = re.compile('EXECUTE.*', re.IGNORECASE)
mdb_re = re.compile('MDB2_STATEMENT\S+')
def clean_query(query, num_words):
"Generalizes a query by removing all unique information"
# Strip carriage returns
query = query.replace("\n", " ")
# Screw it, if it's a prepared statement or an execute, generalize the statement name
if prepare_re.match(query):
query = mdb_re.sub('*statement*', query)
return query
if execute_re.match(query):
query = mdb_re.sub('*statement*', query)
if deallocate_re.match(query):
query = "DEALLOCATE PREPARE"
return query
# Loop through the replacers and perform each one
for (replacer, subst) in replacers:
# try block is here because, apparently, string_re may throw an exception
# TODO: investigate the above
try:
query = replacer.sub(subst, query)
except:
pass
# After we do the cleanup, then we get rid of extra whitespace
words = query.split(None)
query = " ".join(words)
return query
def test_clean_query(query):
"A debug version of the query cleaner which prints steps as it goes"
# Strip carriage returns
query = query.replace("\n", " ")
# Screw it, if it's a prepared statement or an execute, generalize the statement name
if prepare_re.match(query):
query = mdb_re.sub('*statement*', query)
return query
if execute_re.match(query):
query = mdb_re.sub('*statement*', query)
if deallocate_re.match(query):
query = "DEALLOCATE PREPARE"
return query
# Loop through the replacers and perform each one
for (replacer, subst) in replacers:
try:
if replacer.search(query) == None:
print replacer.pattern," : No match"
else:
query = replacer.sub(subst, query)
print replacer.pattern," : ",query
except:
pass
# After we do the cleanup, then we get rid of extra whitespace
words = query.split(None)
query = " ".join(words)
return query
#
# Hostname cache - basically, caches the "linden" host type for a particular IP address
# or hostname
#
sim_re = re.compile(".*sim\d+.*")
web_re = re.compile("int\.web\d+.*")
iweb_re = re.compile("int\.iweb\d+.*")
webds_re = re.compile(".*web-ds\d+.*")
webster_re = re.compile(".*webster\d+.*")
bankds_re = re.compile(".*bank-ds\d+.*")
xmlrpc_re = re.compile(".*xmlrpc\d+.*")
login_re = re.compile(".*login\d+.*")
data_re = re.compile(".*data\..*")
#xmlrpc_re = re.compile("(?:int\.omgiwanna.*)|(?:int\.pony.*)")
ip_re = re.compile("\d+\.\d+\.\d+\.\d+")
ll_re = re.compile("(.*)\.lindenlab\.com")
host_type_cache = {}
def get_host_type(host):
"Returns the genericized linden host type from an IP address or hostname"
# if host in host_type_cache:
# return host_type_cache[host]
named_host = str(host)
if ip_re.match(host):
# Look up the hostname
try:
named_host = str(socket.gethostbyaddr(host)[0])
except:
pass
# Figure out generic host type
host_type = named_host
if sim_re.match(named_host):
host_type = "sim"
elif login_re.match(named_host):
host_type = "login"
elif webster_re.match(named_host):
host_type = "webster"
elif bankds_re.match(named_host):
host_type = "bank-ds"
elif web_re.match(named_host):
host_type = "web"
elif iweb_re.match(named_host):
host_type = "iweb"
elif webds_re.match(named_host):
host_type = "web-ds"
elif data_re.match(named_host):
host_type = "data"
elif xmlrpc_re.match(named_host):
host_type = "xmlrpc"
m = ll_re.match(host_type)
if m:
host_type = m.group(1)
host_type_cache[host] = host_type
return (host_type, named_host)
def LLLogIter(filenames):
"An iterator that iterates line by line over a series of files, even if they're compressed."
for f in filenames:
curr = open_log_file(f)
for line in curr:
yield line
def open_log_file(filename):
# Open the logfile (even if it's compressed)
if re.compile(".+\.gz").match(filename):
# gzipped file, return a gzipped file opject
return gzip.open(filename,"r")
else:
return open(filename, "r")
|
mit
| -3,236,365,569,771,306,500
| 37.127694
| 126
| 0.570226
| false
| 3.741405
| false
| false
| false
|
neeraj-kumar/nkpylib
|
nkmturk.py
|
1
|
5946
|
#!/usr/bin/env python
"""Mechanical Turk-related utilities, written by Neeraj Kumar.
Licensed under the 3-clause BSD License:
Copyright (c) 2013, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time
from pprint import pprint, pformat
OUTHTML_FRAME = '''<html>
<head><title>Rendered mturk template</title></head>
<body>
%s
</body>
</html>'''
def readcsv(fname):
"""Reads the CSV file given and returns a list of dicts"""
import csv
reader = csv.DictReader(open(fname))
ret = [row for row in reader]
return ret
def renderhtml(tmpl, data, rowspec=None):
"""Renders html from the given template and data (list of dicts).
The rowspec should be an expression involving i and r, which are
the row index, and a random float, resp. This will be eval'ed and
only if true will the row be output.
An empty or None rowspec outputs all rows.
"""
from random import random
import re
# convert template to a python-style template
var = re.compile(r'\${(.*?)}')
matches = var.split(tmpl)
s = ''
for i, m in enumerate(matches):
if i%2 == 0:
s += m
else:
s += '%%(%s)s' % (m)
# go through data
rows = []
for i, row in enumerate(data):
r = random()
if rowspec and not eval(rowspec, locals()): continue
rows.append(s % row)
out = OUTHTML_FRAME % ('\n'.join(rows))
return out
def demultiplex(row, nperhit):
"""Demultiplexes this dict and returns a list of dicts."""
import re
end = re.compile(r'_\d+$')
# de-multiplex data
ret = []
for i in range(nperhit):
# copy all data
d = dict(**row)
for k, v in sorted(d.items()):
# find input and output fields and delete them initially
if not k.startswith('Input.') and not k.startswith('Answer.'): continue
del d[k]
# rename to simplified keys
k = k.replace('Input.','').replace('Answer.','')
if end.search(k):
# if it's the current one, we want to add it back in
if k.endswith('_%d' % i):
k = k.rsplit('_', 1)[0]
else: continue # remove multiplexed keys
# add field back in
d[k] = v
ret.append(d)
return ret
def renderout(tmplfname, data, groupby, nperhit):
"""Renders mturk output"""
import web, web.template
from nkutils import partitionByFunc
from nkwebutils import NKStor, mystorify
# de-multiplex and simplify data
data = sum([demultiplex(row, nperhit) for row in data], [])
# group by common key
grouped, _ = partitionByFunc(data, lambda d: d[groupby])
results = []
Cls = NKStor
# build up list of results
for gkey, g in sorted(grouped.items()):
# build up a list of common keys for this result group
r = Cls(g[0])
for el in g:
for k, v in r.items():
if el[k] != v:
del r[k]
# now create each individual sub-output
r['outputs'] = [Cls(el) for el in g]
results.append(r)
#pprint(results)
# render results
renfunc = web.template.frender(tmplfname)
s = renfunc(results)
return s
if __name__ == '__main__':
from pprint import pprint
TASKS = 'renderhit renderout'.split(' ')
if len(sys.argv) < 2:
print 'Usage: python %s <%s> [<args> ...]' % (sys.argv[0], '|'.join(TASKS))
sys.exit()
task = sys.argv[1]
assert task in TASKS
if task == 'renderhit':
if len(sys.argv) < 4:
print 'Usage: python %s renderhit <template> <data csv> [<rowspec>]' % (sys.argv[0])
print " rowspec is an expression involving 'i' (index) and/or 'r' (random float) which is eval'ed"
sys.exit()
tmpl = open(sys.argv[2]).read()
data = readcsv(sys.argv[3])
try:
rowspec = sys.argv[4]
except Exception:
rowspec = None
html = renderhtml(tmpl, data, rowspec)
print html
elif task == 'renderout':
if len(sys.argv) < 5:
print 'Usage: python %s renderout <template> <data csv> <groupby> <nperhit>' % (sys.argv[0])
sys.exit()
tmplfname = sys.argv[2]
data = readcsv(sys.argv[3])
groupby = sys.argv[4]
nperhit = int(sys.argv[5])
html = renderout(tmplfname, data, groupby, nperhit)
print html
|
bsd-3-clause
| 2,662,317,386,595,247,600
| 35.478528
| 111
| 0.623276
| false
| 3.836129
| false
| false
| false
|
coolkang/hsbsite
|
settings.py
|
1
|
12906
|
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost',]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"moderna_app", # This is a template I am using.
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
#"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username for host deploying to
# "HOSTS": ALLOWED_HOSTS[:1], # List of hosts to deploy to (eg, first host)
# "DOMAINS": ALLOWED_HOSTS, # Domains for public site
# "REPO_URL": "ssh://hg@bitbucket.org/user/project", # Project's repo URL
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
####################
# HSBSITE SETTINGS #
####################
SITE_TITLE = 'hbanner'
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
apache-2.0
| 5,025,741,811,072,452,000
| 33.600536
| 79
| 0.685573
| false
| 3.607043
| false
| false
| false
|
unicefuganda/uSurvey
|
survey/views/indicators.py
|
1
|
15675
|
import json
import plotly.offline as opy
import plotly.graph_objs as go
from django.utils.safestring import mark_safe
from django.contrib import messages
from django.contrib.auth.decorators import permission_required, login_required
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from survey.models import Location
from survey.forms.indicator import IndicatorForm,\
IndicatorVariableForm, IndicatorFormulaeForm
from survey.forms.filters import IndicatorFilterForm
from survey.models import Indicator
from survey.models import IndicatorVariable
from survey.models import IndicatorVariableCriteria
from survey.models import Survey
from survey.forms.enumeration_area import LocationsFilterForm
INDICATOR_DOES_NOT_EXIST_MSG = "The indicator requested does not exist."
@login_required
@permission_required('auth.can_view_batches')
def new(request):
"""Creates new indicator. HTML uses with ajax to create variables on same screen with popups"""
indicator_form = IndicatorForm()
if request.method == 'POST':
indicator_form = IndicatorForm(data=request.POST)
if indicator_form.is_valid():
indicator_form.save()
messages.success(request, "Indicator successfully created.")
return HttpResponseRedirect(reverse('list_indicator_page'))
messages.error(request, "Indicator was not created.")
request.breadcrumbs([
('Indicators', reverse('list_indicator_page')),
])
return render(request,
'indicator/new.html',
{'indicator_form': indicator_form,
'title': 'Add Indicator',
'button_label': 'Create',
'cancel_url': reverse('list_indicator_page'),
'action': '/indicators/new/',
'variable_form': IndicatorVariableForm(None)})
@login_required
def edit(request, indicator_id):
indicator = Indicator.objects.get(id=indicator_id)
indicator_form = IndicatorForm(instance=indicator)
if request.method == 'POST':
indicator_form = IndicatorForm(data=request.POST, instance=indicator)
if indicator_form.is_valid():
indicator_form.save()
messages.success(request, "Indicator successfully edited.")
return HttpResponseRedirect("/indicators/")
messages.error(request, "Indicator was not successfully edited.")
request.breadcrumbs([
('Indicators', reverse('list_indicator_page')),
])
context = {
'indicator_form': indicator_form,
'title': 'Edit Indicator',
'button_label': 'Save',
'cancel_url': reverse('list_indicator_page'),
'variable_form': IndicatorVariableForm(None)}
return render(request, 'indicator/new.html', context)
def _process_form(indicator_filter_form, indicators):
if indicator_filter_form.is_valid():
survey_id = indicator_filter_form.cleaned_data['survey']
question_set_id = indicator_filter_form.cleaned_data['question_set'] # could
if question_set_id.isdigit():
indicators = indicators.filter(question_set__id=question_set_id)
elif survey_id.isdigit():
qsets = Survey.objects.get(id=survey_id).qsets.values_list('id', flat=True)
indicators = indicators.filter(question_set__id__in=qsets)
return indicators
@login_required
@permission_required('auth.can_view_batches')
def index(request):
indicators = Indicator.objects.all()
data = request.GET or request.POST
indicator_filter_form = IndicatorFilterForm(data=data)
indicators = _process_form(indicator_filter_form, indicators)
return render(request,
'indicator/index.html',
{'indicators': indicators,
'indicator_filter_form': indicator_filter_form})
@login_required
@permission_required('auth.can_view_batches')
def delete(request, indicator_id):
indicator = Indicator.objects.get(id=indicator_id)
indicator.variables.all().delete()
indicator.delete()
messages.success(request, 'Indicator successfully deleted.')
return HttpResponseRedirect('/indicators/')
def validate_formulae(request):
request_data = request.GET if request.method == 'GET' else request.POST
return JsonResponse({'valid': IndicatorFormulaeForm(data=request_data).is_valid()})
@login_required
@permission_required('auth.can_view_household_groups')
def add_indicator_variable(request, indicator_id):
indicator = Indicator.get(pk=indicator_id)
request.breadcrumbs([
('Indicators', reverse('list_indicator_page')),
(
'Variable List',
reverse(
'view_indicator_variables',
args=(indicator_id)))
])
return _add_variable(request, indicator=indicator)
def _add_variable(request, indicator=None):
form_action = reverse('add_variable')
parameter_questions = []
if indicator:
form_action = reverse("add_indicator_variable", args=(indicator.id, ))
parameter_questions = indicator.eqset.all_questions
variable_form = IndicatorVariableForm(indicator)
if request.method == 'POST':
variable_form = IndicatorVariableForm(indicator, data=request.POST)
if variable_form.is_valid():
variable = variable_form.save()
if request.is_ajax() is False:
messages.success(request, 'Variable successfully saved.')
return HttpResponseRedirect(
reverse('edit_indicator_variable',
args=(
variable.pk,
)))
context = {'variable_form': variable_form,
'indicator': indicator,
'title': "Manage Indicator Criteria",
'button_label': 'Save',
'id': 'add_group_form',
"v_form_action": form_action,
'cancel_url': reverse('list_indicator_page'),
'parameter_questions': parameter_questions,
'condition_title': "Conditions"}
if request.is_ajax():
context['cancel_url'] = None
return render(request, 'indicator/indicator_form.html', context)
return render(request, 'indicator/indicator_variable.html', context)
def add_variable(request):
return _add_variable(request)
def ajax_edit_indicator_variable(request):
data = request.GET or request.POST
if request.is_ajax():
variable_id = data.get('id')
return edit_indicator_variable(request, variable_id)
@login_required
@permission_required('auth.can_view_household_groups')
def edit_indicator_variable(request, variable_id):
variable = IndicatorVariable.get(id=variable_id)
variable_form = IndicatorVariableForm(
variable.indicator, instance=variable)
parameter_questions = []
if variable.indicator:
parameter_questions = variable.indicator.eqset.all_questions
if request.method == 'POST':
variable_form = IndicatorVariableForm(
variable.indicator, instance=variable, data=request.POST)
if variable_form.is_valid():
variable_form.save()
if request.is_ajax() is False:
messages.success(request, 'Variable successfully saved.')
return HttpResponseRedirect(
reverse(
'edit_indicator_variable',
args=(
variable.pk,
)))
context = {
'variable_form': variable_form,
'indicator': variable.indicator,
'title': "Manage Indicator Criteria",
'button_label': 'Save',
'id': 'add_group_form',
"v_form_action": reverse(
"edit_indicator_variable",
args=(
variable_id,
)),
'cancel_url': reverse('list_indicator_page'),
'parameter_questions': parameter_questions,
'conditions': variable.criteria.all(),
'condition_title': "Conditions"}
if request.is_ajax():
context['cancel_url'] = None
return render(request, 'indicator/indicator_form.html', context)
breadcrumbs = [
('Indicators', reverse('list_indicator_page')),
]
if variable.indicator:
breadcrumbs.append(
('Variable List', reverse(
'view_indicator_variables', args=(
variable.indicator.pk, ))))
request.breadcrumbs(breadcrumbs)
return render(request, 'indicator/indicator_variable.html', context)
@login_required
@permission_required('auth.can_view_household_groups')
def delete_indicator_variable(request, variable_id):
get_object_or_404(IndicatorVariable, id=variable_id).delete()
if request.is_ajax():
return add_variable(request)
messages.info(request, 'Variable removed successfully')
return HttpResponseRedirect(reverse('list_indicator_page'))
@login_required
@permission_required('auth.can_view_household_groups')
def ajax_delete_indicator_variable(request):
if request.is_ajax():
variable_id = request.GET.get('id')
return delete_indicator_variable(request, variable_id)
@login_required
@permission_required('auth.can_view_household_groups')
def delete_indicator_criteria(request, indicator_criteria_id):
criterion = get_object_or_404(
IndicatorVariableCriteria,
id=indicator_criteria_id)
variable = criterion.variable
criterion.delete()
if request.is_ajax() is False:
messages.info(request, 'condition removed successfully')
return HttpResponseRedirect(
reverse(
'edit_indicator_variable',
args=(
variable.pk,
)))
def view_indicator_variables(request, indicator_id):
indicator = get_object_or_404(Indicator, id=indicator_id)
request.breadcrumbs([
('Indicators', reverse('list_indicator_page')),
])
context = {'indicator': indicator, 'variables': indicator.variables.all()}
return render(request, 'indicator/indicator_variable_list.html', context)
@login_required
def variables(request):
# return questions before last question
if request.GET.get('id', None):
indicator = Indicator.get(pk=request.GET.get('id', None))
response = list(indicator.variables.values_list('name', flat=True))
else:
var_ids = request.GET.getlist('var_id[]')
response = list(
IndicatorVariable.objects.filter(
id__in=var_ids).values_list(
'name',
flat=True))
return JsonResponse(response, safe=False)
@login_required
@permission_required('auth.can_view_batches')
def indicator_formula(request, indicator_id):
try:
indicator = Indicator.get(id=indicator_id)
except Indicator.DoesNotExist:
messages.error(request, INDICATOR_DOES_NOT_EXIST_MSG)
return HttpResponseRedirect(reverse('list_indicator_page'))
if request.method == 'POST':
formulae_form = IndicatorFormulaeForm(instance=indicator, data=request.POST)
if formulae_form.is_valid():
formulae_form.save()
messages.info(request, 'Formulae has been saved!')
return HttpResponseRedirect(reverse('list_indicator_page'))
else:
formulae_form = IndicatorFormulaeForm(instance=indicator)
request.breadcrumbs([
('Indicator List', reverse('list_indicator_page')),
])
context = {
'indicator_form': formulae_form,
'title': 'Indicator Formulae',
'button_label': 'Save',
'indicator': indicator,
'cancel_url': reverse('list_indicator_page')}
return render(request, 'indicator/formulae.html', context)
def _retrieve_data_frame(request, indicator_id):
selected_location = Location.objects.get(parent__isnull=True)
params = request.GET or request.POST
locations_filter = LocationsFilterForm(data=params)
first_level_location_analyzed = Location.objects.filter(
type__name__iexact="country")[0]
indicator = Indicator.objects.get(id=indicator_id)
last_selected_loc = locations_filter.last_location_selected
if last_selected_loc:
selected_location = last_selected_loc
report_locations = selected_location.get_children().order_by('name')
context = {'request': request, 'indicator': indicator,
'locations_filter': locations_filter,
'selected_location': selected_location,
'report_locations': report_locations
}
return context, indicator.get_data(selected_location, report_level=selected_location.level+1)
@permission_required('auth.can_view_batches')
def simple_indicator(request, indicator_id):
request.breadcrumbs([
('Indicator List', reverse('list_indicator_page')),
])
context, reports_df = _retrieve_data_frame(request, indicator_id)
indicator = context['indicator']
# hence set the location where the report is based. i.e the child current
# selected location.
context['report'] = mark_safe(
reports_df.to_html(
na_rep='-',
classes='table table-striped\
table-bordered dataTable table-hover table-sort'))
variable_names = indicator.active_variables()
report_locations = context['report_locations']
def make_hover_text(row):
return '<br />'.join(['%s: %d' % (name, row[name])
for name in variable_names if str(row[name]).isdigit()])
reports_df['hover-text'] = reports_df.apply(make_hover_text, axis=1)
if report_locations:
trace1 = go.Bar(x=reports_df.index,
y=reports_df[indicator.REPORT_FIELD_NAME], x0=0, y0=0,
name=indicator.name,
text=reports_df['hover-text'],)
data = go.Data([trace1])
margin = go.Margin(pad=15)
layout = go.Layout(
title=indicator.name,
xaxis={'title': report_locations[0].type.name},
yaxis={'title': 'Values per %s' % report_locations[0].type.name},
margin=margin,
annotations=[
dict(
x=xi,
y=yi,
text=str(yi),
xanchor='center',
yanchor='bottom',
showarrow=False,
) for xi, yi in zip(
reports_df.index,
reports_df[indicator.REPORT_FIELD_NAME])]
)
figure = go.Figure(data=data, layout=layout)
graph_div = opy.plot(
figure,
auto_open=False,
output_type='div',
show_link=False)
context['graph'] = mark_safe(graph_div)
return render(request, 'indicator/simple_indicator.html', context)
@login_required
@permission_required('auth.can_view_batches')
def download_indicator_analysis(request, indicator_id):
context, reports_df = _retrieve_data_frame(request, indicator_id)
last_selected_loc = context['selected_location']
indicator = context['indicator']
file_name = '%s%s' % ('%s-%s-' % (
last_selected_loc.type.name,
last_selected_loc.name) if last_selected_loc else '',
indicator.name)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;\
filename="%s.csv"' % file_name
reports_df.to_csv(
response,
date_format='%Y-%m-%d %H:%M:%S',
encoding='utf-8') # exclude interview id
return response
|
bsd-3-clause
| -3,426,124,051,870,418,000
| 37.703704
| 99
| 0.637321
| false
| 4.099111
| false
| false
| false
|
dpgaspar/Flask-AppBuilder
|
examples/quickimages/config.py
|
1
|
1704
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Google", "url": "https://www.google.com/accounts/o8/id"},
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portuguese"},
"es": {"flag": "es", "name": "Spanish"},
"de": {"flag": "de", "name": "German"},
"zh": {"flag": "cn", "name": "Chinese"},
"ru": {"flag": "ru", "name": "Russian"},
}
# ------------------------------
# GLOBALS FOR GENERAL APP's
# ------------------------------
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
IMG_SIZE = (150, 150, True)
AUTH_TYPE = 1
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
APP_NAME = "F.A.B. Example"
APP_THEME = "" # default
# APP_THEME = "cerulean.css" # COOL
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css" # COOL
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css" # COOL
# APP_THEME = "spacelab.css" # NICE
# APP_THEME = "united.css"
|
bsd-3-clause
| 6,128,436,733,377,320,000
| 31.150943
| 72
| 0.572183
| false
| 2.609495
| false
| false
| false
|
ClearCorp-dev/odoo-costa-rica
|
l10n_cr_hr_payroll/hr_employee.py
|
1
|
2087
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class hr_employee(models.Model):
_inherit = 'hr.employee'
def _check_report_number_child(self, cr, uid, ids, context=None):
for employee in self.browse(cr, uid, ids, context=context):
if employee.report_number_child < 0:
return False
return True
@api.onchange('marital')
def _onchange_marital(self):
self.report_spouse = False
marital= fields.Selection([('single', 'Single'), ('married', 'Married'), ('widower', 'Widower'), ('divorced', 'Divorced')], String = 'Marital')
report_spouse= fields.Boolean('Report Spouse', help="If this employee reports his spouse for rent payment")
report_number_child= fields.Integer('Number of children to report', help="Number of children to report for rent payment")
_defaults = {
'report_number_child': 0,
}
_constraints = [
(_check_report_number_child, 'Error! The number of child to report must be greater or equal to zero.', ['report_number_child'])
]
|
agpl-3.0
| 1,186,865,629,129,506,300
| 40.76
| 147
| 0.624341
| false
| 4.044574
| false
| false
| false
|
dahiro/shotgun-replica
|
shotgun_replica/python/tests/shotgun_replica_tests/sync/local_to_shotgun/test_entities_field_change.py
|
1
|
4097
|
'''
Created on Nov 15, 2012
@author: bach
'''
import unittest
import tests_elefant
from shotgun_replica import factories, entities
from tests_elefant import commanda
from shotgun_replica.sync import local_to_shotgun, shotgun_to_local
from shotgun_replica.utilities import entityNaming, debug
class Test( unittest.TestCase ):
local2shotgun = None
testassetlibrary = None
task = None
testasset = None
linkedAsset = None
def setUp( self ):
self.local2shotgun = local_to_shotgun.LocalDBEventSpooler()
self.shotgun2local = shotgun_to_local.EventSpooler()
self.testassetlibrary = factories.getObject( entities.AssetLibrary().getType(),
remote_id = commanda.TEST_ASSETLIBRARY_ID )
self.task = factories.getObject( "Task", remote_id = tests_elefant.testTaskID )
self.testasset = tests_elefant.createTestAsset( self.testassetlibrary )
debug.debug( self.testasset.getLocalID() )
self.linkedAsset = tests_elefant.createTestAsset( self.testassetlibrary )
debug.debug( self.linkedAsset.getLocalID() )
def tearDown( self ):
self.testasset.delete()
self.linkedAsset.delete()
self.assertTrue( self.local2shotgun.connectAndRun(), "synch not successful" )
self.assertTrue( self.shotgun2local.connectAndRun(), "synch not successful" )
def testLinkedAsset( self ):
self.testasset.assets = [ self.linkedAsset ]
self.testasset.save()
# get connection objects from source
connObj = factories.getConnectionObj( baseObj = self.testasset,
linkedObj = self.linkedAsset,
attribute = "assets" )
self.assertNotEqual( connObj, None )
# TODO: synch and check if not two connObj
#
self.assertTrue( self.local2shotgun.connectAndRun(), "synch not successful" )
connObj = factories.getConnectionObj( baseObj = self.testasset,
linkedObj = self.linkedAsset,
attribute = "assets" )
self.assertNotEqual( type( connObj ), list, "multiple connection objects after synch" )
# get attribute of reverse field
reverseAttrName = entityNaming.getReverseAttributeName( "Asset", "assets" )
linkedAsset = factories.getObject( "Asset", local_id = self.linkedAsset.getLocalID() )
retLinks = linkedAsset.getField( reverseAttrName )
self.assertTrue( retLinks != None and self.testasset in retLinks )
# checking sync from shotgun to local
self.assertTrue( self.shotgun2local.connectAndRun(), "synch not successful" )
connObj = factories.getConnectionObj( baseObj = self.testasset,
linkedObj = self.linkedAsset,
attribute = "assets" )
self.assertNotEqual( type( connObj ), list, "multiple connection objects after synch" )
# remove connection
self.testasset.assets = [ ]
self.testasset.save()
connObj = factories.getConnectionObj( baseObj = self.testasset,
linkedObj = self.linkedAsset,
attribute = "assets" )
self.assertEqual( connObj, None )
linkedAsset = factories.getObject( "Asset", local_id = self.linkedAsset.getLocalID() )
retLinks = linkedAsset.getField( reverseAttrName )
self.assertEqual( retLinks, [] )
self.assertTrue( self.local2shotgun.connectAndRun(), "synch not successful" )
connObj = factories.getConnectionObj( baseObj = self.testasset,
linkedObj = self.linkedAsset,
attribute = "assets" )
self.assertEqual( connObj, None )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testLinkedAsset']
unittest.main()
|
bsd-3-clause
| -1,195,573,157,826,296,600
| 39.97
| 96
| 0.604833
| false
| 4.344645
| true
| false
| false
|
eckardm/archivematica
|
src/MCPClient/lib/clientScripts/archivematicaMoveSIP.py
|
1
|
2124
|
#!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
import os
import shutil
import sys
import django
django.setup()
# dashboard
from main.models import SIP
# archivematicaCommon
from custom_handlers import get_script_logger
from fileOperations import renameAsSudo
def updateDB(dst, sip_uuid):
SIP.objects.filter(uuid=sip_uuid).update(currentpath=dst)
def moveSIP(src, dst, sipUUID, sharedDirectoryPath):
# Prepare paths
if src.endswith("/"):
src = src[:-1]
dest = dst.replace(sharedDirectoryPath, "%sharedPath%", 1)
if dest.endswith("/"):
dest = os.path.join(dest, os.path.basename(src))
if dest.endswith("/."):
dest = os.path.join(dest[:-1], os.path.basename(src))
updateDB(dest + "/", sipUUID)
# If destination already exists, delete it with warning
dest_path = os.path.join(dst, os.path.basename(src))
if os.path.exists(dest_path):
print >>sys.stderr, dest_path, 'exists, deleting'
shutil.rmtree(dest_path)
renameAsSudo(src, dst)
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.moveSIP")
src = sys.argv[1]
dst = sys.argv[2]
sipUUID = sys.argv[3]
sharedDirectoryPath = sys.argv[4]
moveSIP(src, dst, sipUUID, sharedDirectoryPath)
|
agpl-3.0
| 5,462,322,742,759,084,000
| 31.181818
| 77
| 0.713277
| false
| 3.436893
| false
| false
| false
|
doc-E-brown/FacialLandmarkingReview
|
experiments/Sec4_ModelDefinition/muctAAM.py
|
1
|
2082
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# S.D.G
"""AAM test for MUCT dataset
:author: Ben Johnston
:license: 3-Clause BSD
"""
# Imports
import os
import menpo.io as mio
from aam import AAM
from menpofit.aam import HolisticAAM, PatchAAM
from sklearn.model_selection import train_test_split
MUCT_DATA_FOLDER = os.getenv('MUCT_DATA', '~/datasets/muct')
class MuctAAM(AAM):
""" MUCT AAM class """
def __init__(self, path_to_data=MUCT_DATA_FOLDER,
model_type=HolisticAAM, basename='muct_aam', verbose=True):
super(MuctAAM, self).__init__(
path_to_data, model_type, basename, verbose)
def load_data(self, crop_percentage=0.1,
test_set_ratio=0.3, max_images=None):
""" Load the images and landmarks in an menpo.io
format and crop the images using the specified
landmarks as a guide
Parameters
---------
"""
images = []
for i in mio.import_images(
self.filepath, max_images=max_images, verbose=self.verbose):
if i.landmarks['PTS'].lms.points.shape[0] != 76:
continue
i = i.crop_to_landmarks_proportion(crop_percentage)
# Convert to grayscale if required
if i.n_channels == 3:
i = i.as_greyscale() # Default to luminosity
images.append(i)
# Split into training and test sets
self.train_set, self.test_set =\
train_test_split(images, test_size=test_set_ratio, random_state=42)
def _crop_grayscale_images(self, filepath, crop_percentage):
images = []
for i in mio.import_images(
filepath, max_images=None, verbose=self.verbose):
i = i.crop_to_landmarks_proportion(crop_percentage)
# Convert to grayscale if required
if i.n_channels == 3:
i = i.as_greyscale() # Default to luminosity
# Due to large training set size use generators for better memory
# efficiency
yield i
|
gpl-3.0
| -6,800,944,591,862,461,000
| 26.76
| 79
| 0.587896
| false
| 3.608319
| true
| false
| false
|
whiteclover/Choco
|
choco/runtime.py
|
1
|
28308
|
# choco/runtime.py
# Copyright (C) 2006-2016 the Choco authors and contributors <see AUTHORS file>
#
# This module is part of Choco and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
from choco import errors, util, compat
from choco.compat import compat_builtins
import sys
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data['capture'] = compat.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise errors.NameConflictError(
"Reserved words passed to render(): %s" %
", ".join(illegal_names))
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise errors.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: choco
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(self, name, context,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(self, name, context, template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri,
calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = \
_populate_self_namespace(context, self.template,
self_ns=self)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return compat.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(self, name, context, module,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != '_':
callable_ = getattr(self.module, key)
if compat.callable(callable_):
yield key, compat.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not compat.callable(callable_):
raise errors.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
def _include_ui(context, ui, template_uri, *args, **kwargs):
uicls = _lookup_uicls(context, ui)
ui_module = uicls(context)
ui_module._execute(*args, **kwargs)
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_choco_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_choco_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_uicls(context, ui):
lookup = context._with_template.lookup
if lookup is None:
raise errors.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uicls = lookup.get_ui(ui)
return uicls
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise errors.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except errors.TopLevelLookupException:
raise errors.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace('self:%s' % template.uri,
context, template=template,
populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_choco_inherit'):
ret = template.module._choco_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _render_ui(template, callable_, pctx, args, data):
context = Context(pctx._buffer_stack[-1], **data)
context._outputting_as_unicode = pctx._outputting_as_unicode
context._set_with_template(template)
_render_context(template, callable_, context)
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import choco.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and \
(template.format_errors or template.error_handler):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = errors.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [
util.FastEncodingBuffer(as_unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
|
mit
| -24,096,702,235,384,970
| 29.50431
| 79
| 0.593966
| false
| 4.267109
| false
| false
| false
|
glogiotatidis/mozillians-new
|
mozillians/users/models.py
|
1
|
18988
|
import os
import uuid
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db import models
from django.db.models import signals as dbsignals
from django.dispatch import receiver
from elasticutils.contrib.django import S, get_es
from elasticutils.contrib.django.models import SearchMixin
from funfactory.urlresolvers import reverse
from product_details import product_details
from sorl.thumbnail import ImageField, get_thumbnail
from tower import ugettext as _, ugettext_lazy as _lazy
from mozillians.common.helpers import gravatar
from mozillians.groups.models import (Group, GroupAlias,
Skill, SkillAlias,
Language, LanguageAlias)
from mozillians.users import (EMPLOYEES, MOZILLIANS, PUBLIC, PRIVACY_CHOICES,
DEFAULT_PRIVACY_FIELDS, PUBLIC_INDEXABLE_FIELDS)
from mozillians.users.managers import UserProfileManager
from mozillians.users.tasks import (update_basket_task, index_objects,
unindex_objects)
COUNTRIES = product_details.get_regions('en-US')
USERNAME_MAX_LENGTH = 30
AVATAR_SIZE = (300, 300)
def _calculate_photo_filename(instance, filename):
"""Generate a unique filename for uploaded photo."""
return os.path.join(settings.USER_AVATAR_DIR, str(uuid.uuid4()) + '.jpg')
class PrivacyField(models.PositiveSmallIntegerField):
def __init__(self, *args, **kwargs):
myargs = {'default': MOZILLIANS,
'choices': PRIVACY_CHOICES}
myargs.update(kwargs)
return super(PrivacyField, self).__init__(*args, **myargs)
class PrivacyAwareS(S):
def privacy_level(self, level=MOZILLIANS):
"""Set privacy level for query set."""
self._privacy_level = level
return self
def _clone(self, *args, **kwargs):
new = super(PrivacyAwareS, self)._clone(*args, **kwargs)
new._privacy_level = getattr(self, '_privacy_level', None)
return new
def __iter__(self):
self._iterator = super(PrivacyAwareS, self).__iter__()
def _generator():
while True:
obj = self._iterator.next()
obj._privacy_level = getattr(self, '_privacy_level', None)
yield obj
return _generator()
class UserProfilePrivacyModel(models.Model):
_privacy_fields = DEFAULT_PRIVACY_FIELDS
_privacy_level = None
privacy_photo = PrivacyField()
privacy_full_name = PrivacyField()
privacy_ircname = PrivacyField()
privacy_email = PrivacyField()
privacy_website = PrivacyField()
privacy_bio = PrivacyField()
privacy_city = PrivacyField()
privacy_region = PrivacyField()
privacy_country = PrivacyField()
privacy_groups = PrivacyField()
privacy_skills = PrivacyField()
privacy_languages = PrivacyField()
privacy_vouched_by = PrivacyField()
class Meta:
abstract=True
class UserProfile(UserProfilePrivacyModel, SearchMixin):
objects = UserProfileManager()
user = models.OneToOneField(User)
full_name = models.CharField(max_length=255, default='', blank=False,
verbose_name=_lazy(u'Full Name'))
is_vouched = models.BooleanField(default=False)
last_updated = models.DateTimeField(auto_now=True, default=datetime.now)
website = models.URLField(max_length=200, verbose_name=_lazy(u'Website'),
default='', blank=True)
vouched_by = models.ForeignKey('UserProfile', null=True, default=None,
on_delete=models.SET_NULL, blank=True,
related_name='vouchees')
date_vouched = models.DateTimeField(null=True, blank=True, default=None)
groups = models.ManyToManyField(Group, blank=True, related_name='members')
skills = models.ManyToManyField(Skill, blank=True, related_name='members')
languages = models.ManyToManyField(Language, blank=True,
related_name='members')
bio = models.TextField(verbose_name=_lazy(u'Bio'), default='', blank=True)
photo = ImageField(default='', blank=True,
upload_to=_calculate_photo_filename)
ircname = models.CharField(max_length=63,
verbose_name=_lazy(u'IRC Nickname'),
default='', blank=True)
country = models.CharField(max_length=50, default='',
choices=COUNTRIES.items(),
verbose_name=_lazy(u'Country'))
region = models.CharField(max_length=255, default='', blank=True,
verbose_name=_lazy(u'Province/State'))
city = models.CharField(max_length=255, default='', blank=True,
verbose_name=_lazy(u'City'))
allows_community_sites = models.BooleanField(
default=True,
verbose_name=_lazy(u'Sites that can determine my vouched status'),
choices=((True, _lazy(u'All Community Sites')),
(False, _lazy(u'Only Mozilla Properties'))))
allows_mozilla_sites = models.BooleanField(
default=True,
verbose_name=_lazy(u'Allow Mozilla sites to access my profile data?'),
choices=((True, _lazy(u'Yes')), (False, _lazy(u'No'))))
basket_token = models.CharField(max_length=1024, default='', blank=True)
class Meta:
db_table = 'profile'
ordering = ['full_name']
def __getattribute__(self, attrname):
_getattr = (lambda x:
super(UserProfile, self).__getattribute__(x))
privacy_fields = _getattr('_privacy_fields')
privacy_level = _getattr('_privacy_level')
if privacy_level is not None and attrname in privacy_fields:
field_privacy = _getattr('privacy_%s' % attrname)
if field_privacy < privacy_level:
return privacy_fields.get(attrname)
return super(UserProfile, self).__getattribute__(attrname)
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Method used by elasticutils."""
if obj is None:
obj = cls.objects.get(pk=obj_id)
d = {}
attrs = ('id', 'is_vouched', 'website', 'ircname',
'region', 'city', 'allows_mozilla_sites',
'allows_community_sites')
for a in attrs:
data = getattr(obj, a)
if isinstance(data, basestring):
data = data.lower()
d.update({a: data})
if obj.country:
d.update({'country':
[obj.country, COUNTRIES[obj.country].lower()]})
# user data
attrs = ('username', 'email', 'last_login', 'date_joined')
for a in attrs:
data = getattr(obj.user, a)
if isinstance(data, basestring):
data = data.lower()
d.update({a: data})
d.update(dict(fullname=obj.full_name.lower()))
d.update(dict(name=obj.full_name.lower()))
d.update(dict(bio=obj.bio))
d.update(dict(has_photo=bool(obj.photo)))
for attribute in ['groups', 'skills', 'languages']:
groups = []
for g in getattr(obj, attribute).all():
groups.extend(g.aliases.values_list('name', flat=True))
d[attribute] = groups
return d
@classmethod
def get_mapping(cls):
"""Returns an ElasticSearch mapping."""
return {
'properties': {
'id': {'type': 'integer'},
'name': {'type': 'string', 'index': 'not_analyzed'},
'fullname': {'type': 'string', 'analyzer': 'standard'},
'email': {'type': 'string', 'index': 'not_analyzed'},
'ircname': {'type': 'string', 'index': 'not_analyzed'},
'username': {'type': 'string', 'index': 'not_analyzed'},
'country': {'type': 'string', 'analyzer': 'whitespace'},
'region': {'type': 'string', 'analyzer': 'whitespace'},
'city': {'type': 'string', 'analyzer': 'whitespace'},
'skills': {'type': 'string', 'analyzer': 'whitespace'},
'groups': {'type': 'string', 'analyzer': 'whitespace'},
'languages': {'type': 'string', 'index': 'not_analyzed'},
'bio': {'type': 'string', 'analyzer': 'snowball'},
'is_vouched': {'type': 'boolean'},
'allows_mozilla_sites': {'type': 'boolean'},
'allows_community_sites': {'type': 'boolean'},
'photo': {'type': 'boolean'},
'website': {'type': 'string', 'index': 'not_analyzed'},
'last_updated': {'type': 'date'},
'date_joined': {'type': 'date'}}}
@classmethod
def search(cls, query, include_non_vouched=False, public=False):
"""Sensible default search for UserProfiles."""
query = query.lower().strip()
fields = ('username', 'bio__text', 'email', 'ircname',
'country__text', 'country__text_phrase',
'region__text', 'region__text_phrase',
'city__text', 'city__text_phrase',
'fullname__text', 'fullname__text_phrase',
'fullname__prefix', 'fullname__fuzzy'
'groups__text')
s = PrivacyAwareS(cls)
if public:
s = s.privacy_level(PUBLIC)
s = s.indexes(cls.get_index(public))
if query:
q = dict((field, query) for field in fields)
s = (s.boost(fullname__text_phrase=5, username=5, email=5,
ircname=5, fullname__text=4, country__text_phrase=4,
region__text_phrase=4, city__text_phrase=4,
fullname__prefix=3, fullname__fuzzy=2,
bio__text=2).query(or_=q))
s = s.order_by('_score', 'name')
if not include_non_vouched:
s = s.filter(is_vouched=True)
return s
@property
def email(self):
"""Privacy aware email property."""
if self._privacy_level and self.privacy_email < self._privacy_level:
return self._privacy_fields['email']
return self.user.email
@property
def display_name(self):
return self.full_name
@property
def privacy_level(self):
"""Return user privacy clearance."""
if self.groups.filter(name='staff').exists():
return EMPLOYEES
if self.is_vouched:
return MOZILLIANS
return PUBLIC
@property
def is_complete(self):
"""Tests if a user has all the information needed to move on
past the original registration view.
"""
return self.display_name.strip() != ''
@property
def is_public(self):
"""Return True is any of the privacy protected fields is PUBLIC."""
for field in self._privacy_fields:
if getattr(self, 'privacy_%s' % field, None) == PUBLIC:
return True
return False
@property
def is_public_indexable(self):
"""For profile to be public indexable should have at least
full_name OR ircname OR email set to PUBLIC.
"""
for field in PUBLIC_INDEXABLE_FIELDS:
if (getattr(self, 'privacy_%s' % field, None) == PUBLIC and
getattr(self, field, None)):
return True
return False
def __unicode__(self):
"""Return this user's name when their profile is called."""
return self.display_name
def get_absolute_url(self):
return reverse('profile', args=[self.user.username])
def anonymize(self):
"""Remove personal info from a user"""
for name in ['first_name', 'last_name', 'email']:
setattr(self.user, name, '')
self.full_name = ''
# Give a random username
self.user.username = uuid.uuid4().hex[:30]
self.user.is_active = False
self.user.save()
for f in self._meta.fields:
if not f.editable or f.name in ['id', 'user']:
continue
if f.default == models.fields.NOT_PROVIDED:
raise Exception('No default value for %s' % f.name)
setattr(self, f.name, f.default)
for f in self._meta.many_to_many:
getattr(self, f.name).clear()
self.save()
def set_instance_privacy_level(self, level):
"""Sets privacy level of instance."""
self._privacy_level = level
def set_privacy_level(self, level, save=True):
"""Sets all privacy enabled fields to 'level'."""
for field in self._privacy_fields:
setattr(self, 'privacy_%s' % field, level)
if save:
self.save()
def set_membership(self, model, membership_list):
"""Alters membership to Groups, Skills and Languages."""
if model is Group:
m2mfield = self.groups
alias_model = GroupAlias
elif model is Skill:
m2mfield = self.skills
alias_model = SkillAlias
elif model is Language:
m2mfield = self.languages
alias_model = LanguageAlias
# Remove any non-system groups that weren't supplied in this list.
m2mfield.remove(*[g for g in m2mfield.all()
if g.name not in membership_list
and not getattr(g, 'system', False)])
# Add/create the rest of the groups
groups_to_add = []
for g in membership_list:
if alias_model.objects.filter(name=g).exists():
group = alias_model.objects.get(name=g).alias
else:
group = model.objects.create(name=g)
if not getattr(g, 'system', False):
groups_to_add.append(group)
m2mfield.add(*groups_to_add)
def get_photo_thumbnail(self, geometry='160x160', **kwargs):
if 'crop' not in kwargs:
kwargs['crop'] = 'center'
if self.photo:
return get_thumbnail(self.photo, geometry, **kwargs)
return get_thumbnail(settings.DEFAULT_AVATAR_PATH, geometry, **kwargs)
def get_photo_url(self, geometry='160x160', **kwargs):
"""Return photo url.
If privacy allows and no photo set, return gravatar link.
If privacy allows and photo set return local photo link.
If privacy doesn't allow return default local link.
"""
if not self.photo and self.privacy_photo >= self._privacy_level:
return gravatar(self.user.email, size=geometry)
return self.get_photo_thumbnail(geometry, **kwargs).url
def vouch(self, vouched_by, commit=True):
if self.is_vouched:
return
self.is_vouched = True
self.vouched_by = vouched_by
self.date_vouched = datetime.now()
if commit:
self.save()
self._email_now_vouched()
def auto_vouch(self):
"""Auto vouch mozilla.com users."""
email = self.user.email
if any(email.endswith('@' + x) for x in settings.AUTO_VOUCH_DOMAINS):
self.vouch(None, commit=False)
def add_to_staff_group(self):
"""Keep users in the staff group if they're autovouchable."""
email = self.user.email
staff, created = Group.objects.get_or_create(name='staff', system=True)
if any(email.endswith('@' + x) for x in
settings.AUTO_VOUCH_DOMAINS):
self.groups.add(staff)
elif staff in self.groups.all():
self.groups.remove(staff)
def _email_now_vouched(self):
"""Email this user, letting them know they are now vouched."""
subject = _(u'You are now vouched on Mozillians!')
message = _(u"You've now been vouched on Mozillians.org. "
"You'll now be able to search, vouch "
"and invite other Mozillians onto the site.")
send_mail(subject, message, settings.FROM_NOREPLY,
[self.user.email])
def save(self, *args, **kwargs):
self._privacy_level = None
self.auto_vouch()
super(UserProfile, self).save(*args, **kwargs)
self.add_to_staff_group()
@classmethod
def get_index(cls, public_index=False):
if public_index:
return settings.ES_INDEXES['public']
return settings.ES_INDEXES['default']
@classmethod
def index(cls, document, id_=None, bulk=False, force_insert=False,
es=None, public_index=False):
""" Overide elasticutils.index() to support more than one index
for UserProfile model.
"""
if bulk and es is None:
raise ValueError('bulk is True, but es is None')
if es is None:
es = get_es()
es.index(document, index=cls.get_index(public_index),
doc_type=cls.get_mapping_type(),
id=id_, bulk=bulk, force_insert=force_insert)
@classmethod
def unindex(cls, id, es=None, public_index=False):
if es is None:
es = get_es()
es.delete(cls.get_index(public_index), cls.get_mapping_type(), id)
@receiver(dbsignals.post_save, sender=User,
dispatch_uid='create_user_profile_sig')
def create_user_profile(sender, instance, created, raw, **kwargs):
if not raw:
up, created = UserProfile.objects.get_or_create(user=instance)
if not created:
dbsignals.post_save.send(sender=UserProfile, instance=up,
created=created, raw=raw)
@receiver(dbsignals.post_save, sender=UserProfile,
dispatch_uid='update_basket_sig')
def update_basket(sender, instance, **kwargs):
update_basket_task.delay(instance.id)
@receiver(dbsignals.post_save, sender=UserProfile,
dispatch_uid='update_search_index_sig')
def update_search_index(sender, instance, **kwargs):
if instance.is_complete:
index_objects.delay(sender, [instance.id], public=False)
if instance.is_public_indexable:
index_objects.delay(sender, [instance.id], public_index=True)
else:
unindex_objects(UserProfile, [instance.id], public_index=True)
@receiver(dbsignals.post_delete, sender=UserProfile,
dispatch_uid='remove_from_search_index_sig')
def remove_from_search_index(sender, instance, **kwargs):
unindex_objects(UserProfile, [instance.id], public_index=False)
unindex_objects(UserProfile, [instance.id], public_index=True)
class UsernameBlacklist(models.Model):
value = models.CharField(max_length=30, unique=True)
is_regex = models.BooleanField(default=False)
def __unicode__(self):
return self.value
class Meta:
ordering = ['value']
|
bsd-3-clause
| -7,662,934,216,234,882,000
| 36.6
| 79
| 0.583421
| false
| 3.972385
| false
| false
| false
|
tommilligan/isoprene-pumpjack
|
isoprene_pumpjack/utils/neo_to_d3.py
|
1
|
2463
|
#!/usr/bin/env python
'''
Transformation of Neo4J result object into a d3 friendly dictionary.
'''
def dedupe_dict_list(duped, id_prop="id"):
'''Dedupe a list of dicts by a dictionary property'''
deduped = list({v[id_prop]:v for v in duped}.values())
return deduped
def neo_node_to_d3_node(node):
d3node = {
"id": node.id,
"labels": [label for label in node.labels],
"props": {k: v for k, v in node.items()}
}
return d3node
def neo_link_to_d3_link(link):
d3link = {
"id": link.id,
"source": link.start,
"target": link.end,
"labels": [link.type],
"props": {k: v for k, v in link.items()}
}
return d3link
def neo_to_d3(result, nodeLabels=[], linkLabels=[]):
'''
Convert neo results to d3 drawable nodes/links object
Takes
* the neo result (BoltStatementResult)
* a list of node labels (string[])
* a list of link labels (string[])
Dedupes to the standard format:
{
nodes: [
{
id: string,
labels: string[],
properties: {}
}
],
links: [
{
id: string,
source: string, # id of a node
target: string, # id of a node
labels: string[],
properties: {}
}
]
}
'''
d3data = {
"nodes": [],
"links": []
}
process_neo_objects = [
{
"labels": nodeLabels,
"function": neo_node_to_d3_node,
"d3key": "nodes"
},
{
"labels": linkLabels,
"function": neo_link_to_d3_link,
"d3key": "links"
}
]
for record in result:
for process in process_neo_objects:
for label in process["labels"]:
neo_objects = record[label]
if isinstance(neo_objects, list):
for neo_object in neo_objects:
d3object = process["function"](neo_object)
d3data[process["d3key"]].append(d3object)
else:
neo_object = neo_objects
d3object = process["function"](neo_object)
d3data[process["d3key"]].append(d3object)
d3data[process["d3key"]] = dedupe_dict_list(d3data[process["d3key"]], "id")
return d3data
|
apache-2.0
| -6,120,799,362,977,779,000
| 25.483871
| 87
| 0.484369
| false
| 3.687126
| false
| false
| false
|
tomato42/tlsfuzzer
|
scripts/test-zero-length-data.py
|
1
|
6794
|
# Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Example empty appd data test"""
from __future__ import print_function
import traceback
from random import sample
import sys
import re
import getopt
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, \
AlertGenerator
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectApplicationData
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription
from tlsfuzzer.utils.lists import natural_sort_keys
version = 2
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -n num run 'num' or all(if 0) tests instead of default(all)")
print(" (excluding \"sanity\" tests)")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" --help this message")
def main():
"""check if app data records with zero payload are accepted by server"""
conversations = {}
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
argv = sys.argv[1:]
opts, argv = getopt.getopt(argv, "h:p:e:n:x:X:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
else:
raise ValueError("Unknown option: {0}".format(opt))
if argv:
help_msg()
raise ValueError("Unknown options: {0}".format(argv))
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
node = node.add_child(ClientHelloGenerator(ciphers))
node = node.add_child(ExpectServerHello())
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(bytearray(0)))
text = b"GET / HTTP/1.0\nX-bad: aaaa\n\n"
node = node.add_child(ApplicationDataGenerator(text))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert(AlertLevel.warning,
AlertDescription.close_notify))
node.next_sibling = ExpectClose()
node = node.add_child(ExpectClose())
conversations["zero-length app data"] = \
conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
sampled_tests = sample(list(conversations.items()), len(conversations))
for c_name, conversation in sampled_tests:
if c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(conversation)
res = True
exception = None
#because we don't want to abort the testing and we are reporting
#the errors to the user, using a bare except is OK
#pylint: disable=bare-except
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
#pylint: enable=bare-except
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good+=1
print("OK")
else:
bad+=1
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
|
gpl-2.0
| 8,951,013,126,996,592,000
| 35.724324
| 108
| 0.583898
| false
| 3.968458
| true
| false
| false
|
mintchaos/django_esv
|
esv/__init__.py
|
1
|
1497
|
import urllib
import httplib2
from django.conf import settings
class EsvClientError(Exception):
pass
class PassageNotFoundError(EsvClientError):
pass
class EsvQuotaExceededError(EsvClientError):
pass
class EsvClient(object):
def __init__(self, key='IP'):
http_cache = getattr(settings, 'ESV_HTTP_CACHE', '/tmp/esv_http_cache')
self.http = httplib2.Http(http_cache)
self.key = key
self._cache = {}
def get_passage(self, passage, headings=False, audio=True, footnotes=False, audio_format="flash"):
params_dict = {
'passage': passage,
'include-headings': headings,
'include_footnotes': footnotes,
'include-word-ids': False,
'include-first-verse-numbers': False,
'include-audio-link': audio,
'audio-format': audio_format,
}
params = urllib.urlencode(params_dict).lower()
# TODO: Check cache here
resp, content = self.http.request("http://www.esvapi.org/v2/rest/passageQuery?key=%s&%s" % (self.key, params), "GET")
if content.startswith("ERROR"):
if content.lower().find('no results found') > 0:
raise PassageNotFoundError
if content.lower().find('you have exceeded your quota') > 0:
raise EsvQuotaExceededError
raise EsvClientError
# TODO: Set cache here
return content
# main instance of the esv client
esv = EsvClient()
|
bsd-3-clause
| 5,610,947,664,951,156,000
| 31.565217
| 125
| 0.613894
| false
| 3.878238
| false
| false
| false
|
MJuddBooth/pandas
|
pandas/core/reshape/reshape.py
|
1
|
36628
|
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from functools import partial
import itertools
import numpy as np
from pandas._libs import algos as _algos, reshape as _reshape
from pandas._libs.sparse import IntIndex
from pandas.compat import PY2, range, text_type, u, zip
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int, is_bool_dtype, is_extension_array_dtype,
is_integer_dtype, is_list_like, is_object_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import notna
from pandas import compat
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
from pandas.core.internals.arrays import extract_array
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index, decons_obs_group_ids, get_compressed_ids,
get_group_index)
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
if values.ndim == 1:
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
constructor = DataFrame
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level
in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError('Unstacked DataFrame is too big, '
'causing int32 overflow')
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1:] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator)
for lab in self.value_columns.codes]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_codes = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_codes[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, codes=result_codes,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker,
fill_value=fill_value)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Implementation note: the basic idea is to
# 1. Do a regular unstack on a dummy array of integers
# 2. Followup with a columnwise take.
# We use the dummy take to discover newly-created missing values
# introduced by the reshape.
from pandas.core.reshape.concat import concat
dummy_arr = np.arange(len(series))
# fill_value=-1, since we will do a series.values.take later
result = _Unstacker(dummy_arr, series.index,
level=level, fill_value=-1).get_result()
out = []
values = extract_array(series, extract_numpy=False)
for col, indices in result.iteritems():
out.append(Series(values.take(indices.values,
allow_fill=True,
fill_value=fill_value),
name=col, index=result.index))
return concat(out, axis='columns', copy=False, keys=result.columns)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
if frame._is_homogeneous_type:
# For homogeneous EAs, frame.values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes.values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type([
col._values for _, col in frame.iteritems()
])
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame.values.ravel()
else:
# non-homogeneous
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(level_codes) for lev, level_codes
in zip(this.columns.levels[:-1],
this.columns.codes[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_used = level_vals[level_codes]
levsize = len(level_codes)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if (frame._is_homogeneous_type and
is_extension_array_dtype(frame.dtypes.iloc[0])):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.iteritems()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
new_levels = [this.index]
new_codes = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
# PY2 embedded unicode, gh-22084
def _make_col_name(prefix, prefix_sep, level):
fstr = '{prefix}{prefix_sep}{level}'
if PY2 and (isinstance(prefix, text_type) or
isinstance(prefix_sep, text_type) or
isinstance(level, text_type)):
fstr = u(fstr)
return fstr.format(prefix=prefix,
prefix_sep=prefix_sep,
level=level)
dummy_cols = [_make_col_name(prefix, prefix_sep, level)
for level in levels]
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
codes = frame.index.codes[num]
if transform is not None:
mapped_items = items.map(transform)
codes, items = _factorize_from_iterable(mapped_items.take(codes))
values = np.eye(len(items), dtype=float)
values = values.take(codes, axis=0)
return DataFrame(values, columns=items, index=frame.index)
def _reorder_for_extension_array_stack(arr, n_rows, n_columns):
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
|
bsd-3-clause
| 777,959,914,915,552,400
| 34.017208
| 79
| 0.577209
| false
| 3.857609
| false
| false
| false
|
BetterWorks/django-anonymizer
|
anonymizer/management/commands/check_anonymizers.py
|
1
|
1139
|
from django.core.management import CommandError
from django.core.management.base import AppCommand
from anonymizer.utils import get_anonymizers
try:
unicode
except NameError:
unicode = str # python 3
class Command(AppCommand):
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more app names.')
def handle_app_config(self, app_config, **options):
anonymizers = get_anonymizers(app_config)
models = set()
errors = []
for klass in anonymizers:
models.add(klass.model)
instance = klass()
try:
instance.validate()
except ValueError as e:
errors.append(unicode(e))
for model in app_config.get_models():
if model._meta.abstract or model._meta.proxy:
continue
if model not in models:
errors.append(u'need anonymizer for %s' % model)
if errors:
raise CommandError('%d errors\n%s' % (len(errors), '\n'.join(errors)))
return 0
|
mit
| 3,961,398,319,339,451,400
| 28.205128
| 82
| 0.579456
| false
| 4.25
| false
| false
| false
|
ZombieAlex/MFCAuto
|
src/main/genConstants.py
|
1
|
3164
|
import re
from urllib.request import urlopen
import json
serverConfig = "https://www.myfreecams.com/_js/serverconfig.js"
url = "https://www.myfreecams.com/_js/mfccore.js"
# Maybe it's wrong to merge in the w. stuff? Is that all just for the UI?
constantRe = re.compile(r'(\s|;?|,)(FCS|w)\.([A-Z0-9]+)_([A-Z0-9_]+)\s+?=\s+?([0-9]+);')
constantMap = dict()
header = """// Various constants and enums used by MFC. Most of these values can be seen here:
// http://www.myfreecams.com/_js/mfccore.js
export const MAGIC = -2027771214;
export const FLASH_PORT = 8100;
export const WEBSOCKET_PORT = 8080;
// STATE is essentially the same as FCVIDEO but has friendly names
// for better log messages and code readability
export enum STATE {
FreeChat = 0, // TX_IDLE
// TX_RESET = 1, // Unused?
Away = 2, // TX_AWAY
// TX_CONFIRMING = 11, // Unused?
Private = 12, // TX_PVT
GroupShow = 13, // TX_GRP
// TX_RESERVED = 14, // Unused?
// TX_KILLMODEL = 15, // Unused?
// C2C_ON = 20, // Unused?
// C2C_OFF = 21, // Unused?
Online = 90, // RX_IDLE
// RX_PVT = 91, // Unused?
// RX_VOY = 92, // Unused?
// RX_GRP = 93, // Unused?
// NULL = 126, // Unused?
Offline = 127, // OFFLINE
}
// Version number to pass along with our
// FCTYPE_LOGIN login requests
//
// The latest Flash version number is here:
// https://www.myfreecams.com/js/wsgw.js
// The latest WebSocket version number is here:
// http://m.myfreecams.com/source.min.js
export enum LOGIN_VERSION {
FLASH = 20071025,
WEBSOCKET = 20080910,
}
"""
#Add our own constants...
constantMap.setdefault("FCTYPE", dict())["CLIENT_MANUAL_DISCONNECT"] = -6
constantMap.setdefault("FCTYPE", dict())["CLIENT_DISCONNECTED"] = -5
constantMap.setdefault("FCTYPE", dict())["CLIENT_MODELSLOADED"] = -4
constantMap.setdefault("FCTYPE", dict())["CLIENT_CONNECTED"] = -3
constantMap.setdefault("FCTYPE", dict())["ANY"] = -2
constantMap.setdefault("FCTYPE", dict())["UNKNOWN"] = -1
with urlopen(url) as data:
scriptText = data.read().decode('utf-8')
result = constantRe.findall(scriptText)
for (prefix1, prefix2, fctype, subtype, num) in result:
constantMap.setdefault(fctype, dict())[subtype] = num
with open("Constants.ts", "w") as f:
f.write(header)
for fctype in sorted(constantMap):
f.write("\nexport enum {} {{\n".format(fctype))
for subtype, value in sorted(constantMap[fctype].items(), key=lambda x: int(x[1])):
f.write(' "{}" = {},\n'.format(subtype, value))
f.write("}\n")
with urlopen(serverConfig) as configData:
configText = configData.read().decode('utf-8')
config = json.loads(configText)
configText = json.dumps(config, indent=4, sort_keys=True)
f.write("\n// tslint:disable:trailing-comma\n")
f.write("export const CACHED_SERVERCONFIG = {}".format(configText))
f.write(";\n// tslint:enable:trailing-comma\n")
print("Done")
|
mit
| 4,662,721,920,012,275,000
| 38.061728
| 95
| 0.599874
| false
| 3.173521
| true
| false
| false
|
rdnetto/Kv-Creator
|
MainWindow.py
|
1
|
2302
|
import kivy.app
import kivy.lang
import traceback
from threading import Thread
from PySide.QtGui import *
from Queue import Queue
from creator_ui import Ui_MainWindow
from kvparser import *
def ErrorHandler(func):
'''Function decorator for displaying exceptions'''
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
traceback.print_exc()
QMessageBox.critical(None, "Error", traceback.format_exc())
QApplication.exit(1)
return wrapper
class MainWindow(Ui_MainWindow, QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.demoThread = None
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
@ErrorHandler
def openFile(self):
if(self.demoThread is not None and self.demoThread.is_alive()):
raise Exception("File already open")
# graphically load file in kivy thread
rootQueue = Queue()
path = "test.kv"
self.demoThread = Thread(name="kivy", target=demo, args=[path, rootQueue])
self.demoThread.daemon = True
self.demoThread.start()
self.rootWidget = rootQueue.get()
# load source and correspond to graphical objects
self.kvfile = KvFile(path)
if(self.rootWidget is None):
raise Exception("Failed to load file")
else:
self.kvfile.rootRule.populate(self.rootWidget)
print("Parsed and corresponded kv file:")
print("\n".join(map(str, self.kvfile.elements)))
@ErrorHandler
def saveFile(self):
if(self.kvfile is None):
raise Exception("No file open")
self.kvfile.save()
def demo(path, rootQueue):
'''Event loop for demo application
path: the .kv file to load
rootQueue: a Queue that the root widget should be pushed onto (or None if creation fails)
'''
def _build():
try:
root = kivy.lang.Builder.load_file(path)
rootQueue.put(root)
return root
except:
rootQueue.put(None)
raise
app = kivy.app.App()
app.build = _build
app.run()
|
gpl-2.0
| 8,181,468,927,837,841,000
| 24.577778
| 93
| 0.619461
| false
| 4.132855
| false
| false
| false
|
alkor/python-opcua
|
examples/minimal-server-with-encryption.py
|
1
|
1176
|
import sys
sys.path.insert(0, "..")
import time
from opcua import ua, Server
if __name__ == "__main__":
# setup our server
server = Server()
server.set_endpoint("opc.tcp://0.0.0.0:4841/freeopcua/server/")
# load server certificate and private key. This enables endpoints
# with signing and encryption.
server.load_certificate("example-certificate.der")
server.load_private_key("example-private-key.pem")
# setup our own namespace, not really necessary but should as spec
uri = "http://examples.freeopcua.github.io"
idx = server.register_namespace(uri)
# get Objects node, this is where we should put our custom stuff
objects = server.get_objects_node()
# populating our address space
myobj = objects.add_object(idx, "MyObject")
myvar = myobj.add_variable(idx, "MyVariable", 6.7)
myvar.set_writable() # Set MyVariable to be writable by clients
# starting!
server.start()
try:
count = 0
while True:
time.sleep(1)
count += 0.1
myvar.set_value(count)
finally:
#close connection, remove subcsriptions, etc
server.stop()
|
lgpl-3.0
| -747,732,777,772,497,200
| 27
| 70
| 0.644558
| false
| 3.675
| false
| false
| false
|
rphlo/django-seuranta
|
seuranta/app/views.py
|
1
|
2277
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from seuranta.models import Competition
@login_required
def own_competitions(request):
user = request.user
comps = Competition.objects.filter(publisher=user)
return render(request,
'seuranta/own_competitions.html',
{'competitions': comps})
@login_required
def create_competition(request):
return render(request,
'seuranta/create_competition.html')
@login_required
def edit_competition(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_competition.html',
{'competition': competition})
@login_required
def edit_map(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_map.html',
{'competition': competition})
@login_required
def edit_competitors(request, competition_id):
competition = get_object_or_404(Competition, id=competition_id)
if competition.publisher != request.user:
raise PermissionDenied
return render(request,
'seuranta/edit_competitors.html',
{'competition': competition})
def list_competitions(request):
ts = now()
qs = Competition.objects.all()
live = qs.filter(
start_date__lte=ts,
end_date__gte=ts,
publication_policy="public"
).order_by('start_date')
upcoming = qs.filter(
start_date__gt=ts,
end_date__gt=ts,
publication_policy="public"
).order_by('start_date')
past = qs.filter(
start_date__lt=ts,
end_date__lt=ts,
publication_policy="public"
).order_by('-end_date')
return render(request,
'seuranta/list_competitions.html',
{'live': live, 'upcoming': upcoming, 'past': past})
|
mit
| -2,442,262,479,194,042,000
| 29.77027
| 69
| 0.646465
| false
| 3.602848
| false
| false
| false
|
prometheus/client_python
|
prometheus_client/values.py
|
1
|
4369
|
from __future__ import unicode_literals
import os
from threading import Lock
import warnings
from .mmap_dict import mmap_key, MmapedDict
class MutexValue(object):
"""A float protected by a mutex."""
_multiprocess = False
def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs):
self._value = 0.0
self._lock = Lock()
def inc(self, amount):
with self._lock:
self._value += amount
def set(self, value):
with self._lock:
self._value = value
def get(self):
with self._lock:
return self._value
def MultiProcessValue(process_identifier=os.getpid):
"""Returns a MmapedValue class based on a process_identifier function.
The 'process_identifier' function MUST comply with this simple rule:
when called in simultaneously running processes it MUST return distinct values.
Using a different function than the default 'os.getpid' is at your own risk.
"""
files = {}
values = []
pid = {'value': process_identifier()}
# Use a single global lock when in multi-processing mode
# as we presume this means there is no threading going on.
# This avoids the need to also have mutexes in __MmapDict.
lock = Lock()
class MmapedValue(object):
"""A float protected by a mutex backed by a per-process mmaped file."""
_multiprocess = True
def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs):
self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode
# This deprecation warning can go away in a few releases when removing the compatibility
if 'prometheus_multiproc_dir' in os.environ and 'PROMETHEUS_MULTIPROC_DIR' not in os.environ:
os.environ['PROMETHEUS_MULTIPROC_DIR'] = os.environ['prometheus_multiproc_dir']
warnings.warn("prometheus_multiproc_dir variable has been deprecated in favor of the upper case naming PROMETHEUS_MULTIPROC_DIR", DeprecationWarning)
with lock:
self.__check_for_pid_change()
self.__reset()
values.append(self)
def __reset(self):
typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params
if typ == 'gauge':
file_prefix = typ + '_' + multiprocess_mode
else:
file_prefix = typ
if file_prefix not in files:
filename = os.path.join(
os.environ.get('PROMETHEUS_MULTIPROC_DIR'),
'{0}_{1}.db'.format(file_prefix, pid['value']))
files[file_prefix] = MmapedDict(filename)
self._file = files[file_prefix]
self._key = mmap_key(metric_name, name, labelnames, labelvalues)
self._value = self._file.read_value(self._key)
def __check_for_pid_change(self):
actual_pid = process_identifier()
if pid['value'] != actual_pid:
pid['value'] = actual_pid
# There has been a fork(), reset all the values.
for f in files.values():
f.close()
files.clear()
for value in values:
value.__reset()
def inc(self, amount):
with lock:
self.__check_for_pid_change()
self._value += amount
self._file.write_value(self._key, self._value)
def set(self, value):
with lock:
self.__check_for_pid_change()
self._value = value
self._file.write_value(self._key, self._value)
def get(self):
with lock:
self.__check_for_pid_change()
return self._value
return MmapedValue
def get_value_class():
# Should we enable multi-process mode?
# This needs to be chosen before the first metric is constructed,
# and as that may be in some arbitrary library the user/admin has
# no control over we use an environment variable.
if 'prometheus_multiproc_dir' in os.environ or 'PROMETHEUS_MULTIPROC_DIR' in os.environ:
return MultiProcessValue()
else:
return MutexValue
ValueClass = get_value_class()
|
apache-2.0
| 4,736,157,789,706,635,000
| 34.811475
| 165
| 0.590753
| false
| 4.192898
| false
| false
| false
|
ownport/local-ci
|
local_ci/travis.py
|
1
|
2167
|
# -*- coding: utf-8 -*-
import os
import re
import utils
from dispatchers import BaseDispatcher
BASH_SCRIPT_TEMPLATE='''#!/bin/bash'''
RE_ENV_PATTERN=re.compile(r'^.+?=.+?$')
CI_STAGES = [
'before_install', 'install',
'before_script', 'script',
'after_success', 'after_failure',
'before_deploy', 'deploy', 'after_deploy',
'after_script',
]
SUPPORTED_CI_STAGES = [
'install',
'script',
]
class TravisRepoDispatcher(BaseDispatcher):
def __init__(self, path, settings):
super(TravisRepoDispatcher, self).__init__(path, settings)
self._travisyml_path = os.path.join(self.repo_path, '.travis.yml')
if not os.path.exists(self._travisyml_path):
raise IOError('The file .travis.yml does not exist in the directory %s' % self.repo_path)
self._travisyml = utils.read_yaml(self._travisyml_path)
def docker_images(self):
''' returns the list of docker images
'''
language = self._travisyml.get('language', None)
if not language:
raise RuntimeError("The language variable is missed in configuration files")
versions = self._travisyml.get(language, None)
if not versions:
raise RuntimeError("The variable is missed in configuration file, %s" % language)
return [self.get_docker_image(':'.join((language, str(ver))))
for ver in versions]
def script(self):
''' returns the script for execution in docker container
'''
script = ['#!/bin/sh',]
env_vars = list(self._travisyml.get('env', []))
env_vars.extend(list(self.settings.get('env', [])))
script.extend(['\n# Environment variables'])
script.extend([ "export %s" % e for e in env_vars if RE_ENV_PATTERN.match(e) ])
for stage in SUPPORTED_CI_STAGES:
stage_actions = self._travisyml.get(stage, None)
if stage == 'install':
stage_actions.append('cd /repo')
if stage_actions:
script.extend(['\n# Stage: %s' % stage,])
script.extend(stage_actions)
return '\n'.join(script)
|
apache-2.0
| 3,065,606,958,559,906,000
| 28.283784
| 101
| 0.595293
| false
| 3.749135
| false
| false
| false
|
alfa-jor/addon
|
plugin.video.alfa/servers/gvideo.py
|
1
|
2665
|
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, "" # desactivada verificación pq se encalla!
response = httptools.downloadpage(page_url, headers={"Referer": page_url})
global page
page = response
if "no+existe" in response.data or 'no existe.</p>' in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tienes permiso para acceder a este video"
if "Se ha producido un error" in response.data:
return False, "[gvideo] Se ha producido un error en el reproductor de google"
if "No+se+puede+procesar+este" in response.data:
return False, "[gvideo] No se puede procesar este video"
if response.code == 429:
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
video_urls = []
urls = []
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
url = page_url
headers_string = httptools.get_url_headers(page_url, forced=True)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
if not quality:
quality = '59'
streams.append((quality, url))
else:
data = page.data
bloque= scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map(.*)')
if bloque:
data = bloque
data = data.decode('unicode-escape', errors='replace')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = httptools.get_url_headers(page_url, forced=True)
streams = scrapertools.find_multiple_matches(data,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags.get(itag, ''), video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls
|
gpl-3.0
| 4,788,274,870,246,898,000
| 35.452055
| 122
| 0.606539
| false
| 3.351385
| false
| false
| false
|
ainafp/nilearn
|
plot_haxby_different_estimators.py
|
1
|
5881
|
"""
Different classifiers in decoding the Haxby dataset
=====================================================
Here we compare different classifiers on a visual object recognition
decoding task.
"""
import time
### Fetch data using nilearn dataset fetcher ################################
from nilearn import datasets
data_files = datasets.fetch_haxby(n_subjects=1)
# load labels
import numpy as np
labels = np.recfromcsv(data_files.session_target[0], delimiter=" ")
stimuli = labels['labels']
# identify resting state labels in order to be able to remove them
resting_state = stimuli == "rest"
# find names of remaining active labels
categories = np.unique(stimuli[resting_state == False])
# extract tags indicating to which acquisition run a tag belongs
session_labels = labels["chunks"][resting_state == False]
# Load the fMRI data
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
masker = NiftiMasker(mask=data_files['mask_vt'][0], standardize=True)
masked_timecourses = masker.fit_transform(
data_files.func[0])[resting_state == False]
### Classifiers definition
# A support vector classifier
from sklearn.svm import SVC
svm = SVC(C=1., kernel="linear")
from sklearn.grid_search import GridSearchCV
# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that
# can significantly speed up the fitting process on computers with
# multiple cores
svm_cv = GridSearchCV(SVC(C=1., kernel="linear"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
# The logistic regression
from sklearn.linear_model import LogisticRegression, RidgeClassifier, \
RidgeClassifierCV
logistic = LogisticRegression(C=1., penalty="l1")
logistic_50 = LogisticRegression(C=50., penalty="l1")
logistic_l2 = LogisticRegression(C=1., penalty="l2")
logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"),
param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
scoring='f1')
ridge = RidgeClassifier()
ridge_cv = RidgeClassifierCV()
# Make a data splitting object for cross validation
from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
cv = LeaveOneLabelOut(session_labels)
classifiers = {'SVC': svm,
'SVC cv': svm_cv,
'log l1': logistic,
'log l1 50': logistic_50,
'log l1 cv': logistic_cv,
'log l2': logistic_l2,
'log l2 cv': logistic_l2_cv,
'ridge': ridge,
'ridge cv': ridge_cv}
classifiers_scores = {}
for classifier_name, classifier in sorted(classifiers.items()):
classifiers_scores[classifier_name] = {}
print 70 * '_'
for category in categories:
classification_target = stimuli[resting_state == False] == category
t0 = time.time()
classifiers_scores[classifier_name][category] = cross_val_score(
classifier,
masked_timecourses,
classification_target,
cv=cv, scoring="f1")
print "%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % (
classifier_name, category,
classifiers_scores[classifier_name][category].mean(),
classifiers_scores[classifier_name][category].std(),
time.time() - t0)
###############################################################################
# make a rudimentary diagram
import matplotlib.pyplot as plt
plt.figure()
tick_position = np.arange(len(categories))
plt.xticks(tick_position, categories, rotation=45)
for color, classifier_name in zip(
['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],
sorted(classifiers)):
score_means = [classifiers_scores[classifier_name][category].mean()
for category in categories]
plt.bar(tick_position, score_means, label=classifier_name,
width=.11, color=color)
tick_position = tick_position + .09
plt.ylabel('Classification accurancy (f1 score)')
plt.xlabel('Visual stimuli category')
plt.ylim(ymin=0)
plt.legend(loc='lower center', ncol=3)
plt.title('Category-specific classification accuracy for different classifiers')
plt.tight_layout()
###############################################################################
# Plot the face vs house map for the different estimators
# use the average EPI as a background
from nilearn import image
mean_epi = image.mean_img(data_files.func[0]).get_data()
# Restrict the decoding to face vs house
condition_mask = np.logical_or(stimuli == 'face', stimuli == 'house')
masked_timecourses = masked_timecourses[condition_mask[resting_state == False]]
stimuli = stimuli[condition_mask]
# Transform the stimuli to binary values
stimuli = (stimuli == 'face').astype(np.int)
for classifier_name, classifier in sorted(classifiers.items()):
classifier.fit(masked_timecourses, stimuli)
if hasattr(classifier, 'coef_'):
weights = classifier.coef_[0]
elif hasattr(classifier, 'best_estimator_'):
weights = classifier.best_estimator_.coef_[0]
else:
continue
weight_img = masker.inverse_transform(weights)
weight_map = weight_img.get_data()
plt.figure(figsize=(3, 5))
plt.imshow(np.rot90(mean_epi[..., 27]), interpolation='nearest',
cmap=plt.cm.gray)
vmax = max(-weight_map.min(), weight_map.max())
plt.imshow(np.rot90(
np.ma.masked_inside(weight_map[..., 27], -.001*vmax, .001*vmax)),
interpolation='nearest', vmax=vmax, vmin=-vmax)
plt.axis('off')
plt.title('%s: face vs house' % classifier_name)
plt.tight_layout()
plt.show()
|
bsd-3-clause
| -6,053,872,600,998,459,000
| 34.215569
| 80
| 0.627444
| false
| 3.791747
| false
| false
| false
|
doirisks/dori
|
models/10.1001:archinte.167.10.1068/model_f.py
|
1
|
4027
|
"""
model_f.py
by Ted Morin
contains a function to predict 8-year Diabtes Mellitus risks beta coefficients and logistic model from
10.1001/archinte.167.10.1068
2007 Prediction of Incident Diabetes Mellitus in Middle Aged Adults
Framingham Heart Study
(Table 5, Complex Model 2)
function expects parameters of:
"Male Sex" "Age" "Systolic BP" "Diastolic BP" "BMI" "Waist Circumf" "HDL-C" "Triglycerides" "Fasting Glucose"
years mm Hg mm Hg kg/m^2 cm mg/dL mg/dL mg/dL
bool int/float int/float int/float int/float i/f i/f i/f i/f
function expects parameters of (continued):
"Parental History of DM" "Antihypertensive Medication Use" "Gutt Insulin Sensitivity Index"
bool bool float/int
"""
# COMPLEX MODELS ARE INCOMPLETE: UNCHECKED + PERCENTILE VALUES NOT LISTED
def model(ismale,age,sbp,dbp,bmi,waistcirc,hdl,tri,glucose,parent,trtbp, guttinsul):
# imports
import numpy as np
# betas
# derived from Odds Ratios in paper
betas = np.array([
−5.427, #Intercept
0, #Age<50
-0.0043648054, #Age 50-64
-0.0915149811, #Age >=65
0.0492180227, #Male
0.2380461031, #Parental history of diabetes mellitus
0, #BMI <25
0.0681858617, #BMI 25.0-29.9
0.2552725051, #BMI >=30
0.1461280357, #Blood pressure >130/85 mm Hg or receiving therapy
0.3384564936, #HDL-C level <40 mg/dL in men or <50 mg/dL in women
0.1760912591, #Triglyceride level >=150 mg/dL
0.096910013, #Waist circumference >88 cm in women or >102 cm in men
0.7259116323, #Fasting glucose level 100-126 mg/dL
0, #2-Hour OGTT finding 140-200 mg/dL # Not Included
0, #Fasting insulin level >75th percentile # Not Included
0, #C-reactive protein level >75th percentile # Not Included
0.357934847, #Log Gutt insulin sensitivity index <25th percentile # TODO impossible?
0, #Log HOMA insulin resistance index >75th percentile # Not Included
0, #HOMA beta-cell index <25th percentile # Not Included
])
# determining factors:
values = [0]*20
values[0] = 1
# age
if age < 50:
values[1] = 1
elif age < 64 :
values[2] = 1
else :
values[3] = 1
# sex
if ismale:
values[4] = 1
# parental history
if parent:
values[5] = 1
# BMI
if bmi < 25.:
values[6] = 1
elif bmi < 30.:
values[7] = 1
else :
values[8] = 1
# blood pressure
if ((sbp >= 130.) or (dbp >= 85.) or trtbp) :
values[9] = 1
# HDL-C
if ismale and hdl < 40:
values[10] = 1
elif (not ismale) and hdl < 50:
values[10] = 1
# Triglycerides
if tri >= 150:
values[11] = 1
# Waist Circumference
if ismale and waistcirc > 102:
values[12] = 1
elif (not ismale) and waistcirc > 88:
values[12] = 1
# Fasting glucose
if glucose >= 100:
values[13] = 1
# Log GUTT insulin sensitivity index
guttinsul = np.log(guttinsul)
crit_guttinsul = -1000000 # real value not known TODO
if guttinsul < crit_guttinsul:
values[17] = 1
# dot betas and values
z = np.dot(betas,np.array(values))
# calculate risk
return 1.0 / (1 + np.exp(-z))
|
gpl-3.0
| 3,382,809,701,925,434,000
| 34.307018
| 111
| 0.503602
| false
| 3.411017
| false
| false
| false
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/vpc/suite_setup.py
|
1
|
4050
|
'''
setup virtual router suite environment, including start zstack node, deploy
initial database, setup vlan devices.
@author: Frank
'''
import os
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.operations.scenario_operations as scenario_operations
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.config_operations as conf_ops
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
EXTRA_HOST_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_host_setup_config.sh' % USER_PATH
def test():
if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file):
scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config)
test_util.test_skip('Suite Setup Success')
if test_lib.scenario_config != None and test_lib.scenario_destroy != None:
scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy)
nic_name = "eth0"
if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file):
nic_name = "zsn0"
linux.create_vlan_eth(nic_name, 1010)
linux.create_vlan_eth(nic_name, 1011)
#This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine.
#linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0")
#linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0")
#no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases.
linux.create_vlan_eth(nic_name, 10)
linux.create_vlan_eth(nic_name, 11)
#If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
test_lib.setup_plan.deploy_test_agent()
cmd = host_plugin.CreateVlanDeviceCmd()
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
cmd.ethname = nic_name
cmd.vlan = 10
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
cmd.vlan = 11
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.execute_plan_without_deploy_test_agent()
conf_ops.change_global_config("applianceVm", "agent.deployOnStart", 'true')
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file)
for host in hosts:
os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_))
delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('image', 'Direct')
# if test_lib.lib_get_ha_selffencer_maxattempts() != None:
# test_lib.lib_set_ha_selffencer_maxattempts('60')
# test_lib.lib_set_ha_selffencer_storagechecker_timeout('60')
test_lib.lib_set_primary_storage_imagecache_gc_interval(1)
test_util.test_pass('Suite Setup Success')
|
apache-2.0
| -1,399,914,541,662,224,000
| 53.479452
| 221
| 0.716543
| false
| 3.316953
| true
| false
| false
|
tidalf/plugin.audio.qobuz
|
resources/lib/qobuz/node/similar_artist.py
|
1
|
1695
|
'''
qobuz.node.similar_artist
~~~~~~~~~~~~~~~~~~~~~~~~~
:part_of: kodi-qobuz
:copyright: (c) 2012-2018 by Joachim Basmaison, Cyril Leclerc
:license: GPLv3, see LICENSE for more details.
'''
from qobuz import config
from qobuz.api import api
from qobuz.gui.util import lang
from qobuz.node import getNode, Flag, helper
from qobuz.node.inode import INode
from qobuz.debug import getLogger
logger = getLogger(__name__)
class Node_similar_artist(INode):
def __init__(self, parent=None, parameters=None, data=None):
parameters = {} if parameters is None else parameters
super(Node_similar_artist, self).__init__(
parent=parent, parameters=parameters, data=data)
self.nt = Flag.SIMILAR_ARTIST
self.content_type = 'artists'
self.lang = lang(30156)
def fetch(self, options=None):
return api.get('/artist/getSimilarArtists',
artist_id=self.nid,
offset=self.offset,
limit=self.limit)
def _count(self):
return len(self.data['artists']['items'])
def populate(self, options=None):
skip_empty = not config.app.registry.get(
'display_artist_without_album', to='bool')
for data in self.data['artists']['items']:
if skip_empty and data['albums_count'] < 1:
continue
artist = getNode(Flag.ARTIST, data=data)
cache = artist.fetch(helper.TreeTraverseOpts(lvl=3,noRemote=True))
if cache is not None:
artist.data = cache
self.add_child(artist)
return True if len(self.data['artists']['items']) > 0 else False
|
gpl-3.0
| 2,751,515,988,515,557,400
| 34.3125
| 78
| 0.60472
| false
| 3.660907
| false
| false
| false
|
repleo/bounca
|
api/urls.py
|
1
|
1469
|
"""API v1 end-points"""
from django.conf.urls import include, url
from rest_auth.registration.urls import urlpatterns as urlpatterns_registration
from rest_auth.urls import urlpatterns as urlpatterns_rest_auth
from rest_framework_swagger.views import get_swagger_view
from .views import (
CertificateCRLFileView, CertificateCRLView, CertificateFilesView, CertificateInfoView, CertificateInstanceView,
CertificateListView, CertificateRevokeView)
urlpatterns_apiv1 = [
url(r'^certificates/files/(?P<pk>[\d]+)$', CertificateFilesView.as_view(), name='certificate-files'),
url(r'^certificates/crl/(?P<pk>[\d]+)$', CertificateCRLView.as_view(), name='certificate-crl'),
url(r'^certificates/crlfile/(?P<pk>[\d]+)$', CertificateCRLFileView.as_view(), name='certificate-crl-file'),
url(r'^certificates/(?P<pk>[\d]+)$', CertificateInstanceView.as_view(), name='certificate-instance'),
url(r'^certificates/info/(?P<pk>[\d]+)$', CertificateInfoView.as_view(), name='certificate-info'),
url(r'^certificates/revoke/(?P<pk>[\d]+)$', CertificateRevokeView.as_view(), name='certificate-revoke'),
url(r'^certificates', CertificateListView.as_view(), name='certificates'),
url(r'^auth/', include(urlpatterns_rest_auth)),
url(r'^auth/registration/', include(urlpatterns_registration))
]
schema_view = get_swagger_view(title='BounCA API')
urlpatterns = [
url(r'^v1/', include(urlpatterns_apiv1)),
url(r'docs/', schema_view),
]
|
apache-2.0
| 38,793,706,840,446,420
| 42.205882
| 115
| 0.720218
| false
| 3.618227
| false
| true
| false
|
NMGRL/pychron
|
pychron/gis/views.py
|
1
|
2459
|
# ===============================================================================
# Copyright 2020 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traitsui.api import View, Item, UItem, HGroup, Heading, spring, FileEditor
from traitsui.editors import InstanceEditor, ListEditor
from traitsui.group import VGroup
from pychron.core.pychron_traits import BorderVGroup
from pychron.options.options import SubOptions, GroupSubOptions as _GroupSubOptions
from pychron.paths import paths
from pychron.pychron_constants import MAIN
class MainView(SubOptions):
def traits_view(self):
v = View(BorderVGroup(Item('basemap_uri_template', label='Base Map URI'),
label='Web Map Services'),
HGroup(spring, Heading('or'), spring),
BorderVGroup(Item('basemap_path', editor=FileEditor(root_path=paths.data_dir)),
label='Local Raster'),
UItem('basemap_uri', style='custom'))
return v
# class AppearanceView(SubOptions):
# def traits_view(self):
# v = View(BorderVGroup(Item('symbol_size'),
# Item('symbol_kind'),
# Item('symbol_color')))
# return v
class GroupSubOptions(_GroupSubOptions):
def traits_view(self):
g = self._make_group()
return self._make_view(g)
class LayersSubOptions(SubOptions):
def traits_view(self):
v = View(VGroup(HGroup(UItem('add_layer_button')),
UItem('layers', editor=ListEditor(mutable=True, style='custom',
editor=InstanceEditor()))))
return v
VIEWS = {MAIN.lower(): MainView,
'groups': GroupSubOptions,
'layers': LayersSubOptions}
# ============= EOF =============================================
|
apache-2.0
| -7,972,781,191,736,847,000
| 36.830769
| 96
| 0.583571
| false
| 4.329225
| false
| false
| false
|
linostar/timeline-clone
|
source/timelinelib/wxgui/component.py
|
1
|
2662
|
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.db import db_open
from timelinelib.wxgui.components.timeline import TimelinePanel
class DummyConfig(object):
def __init__(self):
self.window_size = (100, 100)
self.window_pos = (100, 100)
self.window_maximized = False
self.show_sidebar = True
self.show_legend = True
self.sidebar_width = 200
self.recently_opened = []
self.open_recent_at_startup = False
self.balloon_on_hover = True
self.week_start = "monday"
self.use_inertial_scrolling = False
def get_sidebar_width(self):
return self.sidebar_width
def get_show_sidebar(self):
return self.show_sidebar
def get_show_legend(self):
return self.show_legend
def get_balloon_on_hover(self):
return self.balloon_on_hover
class DummyStatusBarAdapter(object):
def set_text(self, text):
pass
def set_hidden_event_count_text(self, text):
pass
def set_read_only_text(self, text):
pass
class DummyMainFrame(object):
def enable_disable_menus(self):
pass
def edit_ends(self):
pass
def ok_to_edit(self):
return False
class TimelineComponent(TimelinePanel):
def __init__(self, parent):
TimelinePanel.__init__(
self, parent, DummyConfig(), self.handle_db_error,
DummyStatusBarAdapter(), DummyMainFrame())
self.activated()
def handle_db_error(self, e):
pass
def open_timeline(self, path):
timeline = db_open(path)
self.timeline_canvas.set_timeline(timeline)
self.sidebar.category_tree.set_timeline_view(
self.timeline_canvas.get_timeline(),
self.timeline_canvas.get_view_properties()
)
def clear_timeline(self):
self.timeline_canvas.set_timeline(None)
self.sidebar.category_tree.set_no_timeline_view()
|
gpl-3.0
| 7,427,410,965,997,319,000
| 26.163265
| 70
| 0.661533
| false
| 3.797432
| false
| false
| false
|
2gis/stf-utils
|
stf_utils/stf_record/protocol.py
|
1
|
2324
|
import time
import logging
from autobahn.asyncio.websocket import WebSocketClientProtocol
log = logging.getLogger(__name__)
class STFRecordProtocol(WebSocketClientProtocol):
img_directory = None
address = None
resolution = None
def __init__(self):
super().__init__()
self.first_msg_timestamp = None
self.previous_msg_timestamp = None
self.current_msg_timestamp = None
def _construct_img_filename(self):
img_filename = "{0}.jpg".format(
self.current_msg_timestamp - self.first_msg_timestamp
)
return img_filename
@staticmethod
def _write_image_file(img_filename, binary_data):
with open(img_filename, 'bw+') as file:
log.debug('Writing image data to file {0}'.format(file.name))
file.write(binary_data)
def _write_metadata(self, img_filename):
metadata_filename = "{0}/input.txt".format(self.img_directory)
m_file = open(metadata_filename, 'a')
log.debug('Appending image metadata to file {0}'.format(m_file.name))
if self.previous_msg_timestamp is not None:
duration = self.current_msg_timestamp - self.previous_msg_timestamp
m_file.write("duration {0}\n".format(duration))
m_file.write("file '{0}'\n".format(img_filename))
m_file.close()
def save_data_and_metadata(self, binary_data):
img_filename = self._construct_img_filename()
self._write_image_file("{0}/{1}".format(self.img_directory, img_filename), binary_data)
self._write_metadata(img_filename)
def onOpen(self):
log.debug('Starting receive binary data')
if self.resolution:
self.sendMessage(self.resolution.encode('ascii'))
self.sendMessage('on'.encode('ascii'))
def onMessage(self, payload, isBinary):
if isBinary:
self.current_msg_timestamp = time.time()
if self.previous_msg_timestamp is None:
self.first_msg_timestamp = self.current_msg_timestamp
self.save_data_and_metadata(payload)
self.previous_msg_timestamp = self.current_msg_timestamp
def onClose(self, wasClean, code, reason):
log.debug('Disconnecting {0} ...'.format(self.address))
self.sendMessage('off'.encode('ascii'))
|
mit
| 4,412,727,215,913,162,000
| 36.483871
| 95
| 0.636403
| false
| 3.886288
| false
| false
| false
|
dls-controls/pymalcolm
|
tests/test_modules/test_builtin/test_basiccontroller.py
|
1
|
1213
|
import unittest
from malcolm.core import Alarm, AlarmSeverity, Process
from malcolm.modules.builtin.controllers import BasicController
from malcolm.modules.builtin.infos import HealthInfo
class TestBasicController(unittest.TestCase):
def setUp(self):
self.process = Process("proc")
self.o = BasicController("MyMRI")
self.process.add_controller(self.o)
self.process.start()
self.b = self.process.block_view("MyMRI")
def tearDown(self):
self.process.stop(timeout=2)
def update_health(self, num, alarm=Alarm.ok):
self.o.update_health(num, HealthInfo(alarm))
def test_set_health(self):
self.update_health(1, Alarm(severity=AlarmSeverity.MINOR_ALARM))
self.update_health(2, Alarm(severity=AlarmSeverity.MAJOR_ALARM))
assert self.b.health.alarm.severity == AlarmSeverity.MAJOR_ALARM
self.update_health(1, Alarm(severity=AlarmSeverity.UNDEFINED_ALARM))
self.update_health(2, Alarm(severity=AlarmSeverity.INVALID_ALARM))
assert self.b.health.alarm.severity == AlarmSeverity.UNDEFINED_ALARM
self.update_health(1)
self.update_health(2)
assert self.o.health.value == "OK"
|
apache-2.0
| 2,259,333,449,535,143,000
| 35.757576
| 76
| 0.700742
| false
| 3.407303
| false
| false
| false
|
kontza/sigal
|
tests/test_encrypt.py
|
1
|
2796
|
import os
import pickle
from io import BytesIO
from sigal import init_plugins
from sigal.gallery import Gallery
from sigal.plugins.encrypt import endec
from sigal.plugins.encrypt.encrypt import cache_key
CURRENT_DIR = os.path.dirname(__file__)
def get_key_tag(settings):
options = settings["encrypt_options"]
key = endec.kdf_gen_key(
options["password"],
options["kdf_salt"],
options["kdf_iters"]
)
tag = options["gcm_tag"].encode("utf-8")
return (key, tag)
def test_encrypt(settings, tmpdir, disconnect_signals):
settings['destination'] = str(tmpdir)
if "sigal.plugins.encrypt" not in settings["plugins"]:
settings['plugins'] += ["sigal.plugins.encrypt"]
settings['encrypt_options'] = {
'password': 'password',
'ask_password': True,
'gcm_tag': 'AuTheNTiCatIoNtAG',
'kdf_salt': 'saltysaltsweetysweet',
'kdf_iters': 10000,
'encrypt_symlinked_originals': False
}
init_plugins(settings)
gal = Gallery(settings)
gal.build()
# check the encrypt cache exists
cachePath = os.path.join(settings["destination"], ".encryptCache")
assert os.path.isfile(cachePath)
encryptCache = None
with open(cachePath, "rb") as cacheFile:
encryptCache = pickle.load(cacheFile)
assert isinstance(encryptCache, dict)
testAlbum = gal.albums["encryptTest"]
key, tag = get_key_tag(settings)
for media in testAlbum:
# check if sizes are stored in cache
assert cache_key(media) in encryptCache
assert "size" in encryptCache[cache_key(media)]
assert "thumb_size" in encryptCache[cache_key(media)]
assert "encrypted" in encryptCache[cache_key(media)]
encryptedImages = [
media.dst_path,
media.thumb_path
]
if settings["keep_orig"]:
encryptedImages.append(os.path.join(settings["destination"],
media.path, media.big))
# check if images are encrypted by trying to decrypt
for image in encryptedImages:
with open(image, "rb") as infile:
with BytesIO() as outfile:
endec.decrypt(key, infile, outfile, tag)
# check static files have been copied
static = os.path.join(settings["destination"], 'static')
assert os.path.isfile(os.path.join(static, "decrypt.js"))
assert os.path.isfile(os.path.join(static, "keycheck.txt"))
assert os.path.isfile(os.path.join(settings["destination"], "sw.js"))
# check keycheck file
with open(os.path.join(settings["destination"],
'static', "keycheck.txt"), "rb") as infile:
with BytesIO() as outfile:
endec.decrypt(key, infile, outfile, tag)
|
mit
| 7,785,521,272,187,205,000
| 32.285714
| 73
| 0.629471
| false
| 3.856552
| false
| false
| false
|
mohierf/bottle-webui
|
alignak_webui/objects/item_hostgroup.py
|
1
|
3392
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Many functions need to use protected members of a base class
# pylint: disable=protected-access
# Attributes need to be defined in constructor before initialization
# pylint: disable=attribute-defined-outside-init
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the classes used to manage the application objects with the data manager.
"""
from alignak_webui.objects.element import BackendElement
class HostGroup(BackendElement):
"""
Object representing a hostgroup
"""
_count = 0
# Next value used for auto generated id
_next_id = 1
# _type stands for Backend Object Type
_type = 'hostgroup'
# _cache is a list of created objects
_cache = {}
# Converting real state identifier to text status
overall_state_to_status = [
'ok', 'acknowledged', 'in_downtime', 'warning', 'critical'
]
def __init__(self, params=None, date_format='%a, %d %b %Y %H:%M:%S %Z', embedded=True):
"""
Create a hostgroup (called only once when an object is newly created)
"""
self._linked_hostgroups = 'hostgroup'
self._linked__parent = 'hostgroup'
self._linked_hosts = 'host'
super(HostGroup, self).__init__(params, date_format, embedded)
if not hasattr(self, '_overall_state'):
setattr(self, '_overall_state', 0)
@property
def members(self):
""" Return linked object """
return self._linked_hosts
@property
def hosts(self):
""" Return linked object """
return self._linked_hosts
@property
def hostgroups(self):
""" Return linked object """
return self._linked_hostgroups
@property
def _parent(self):
""" Return group parent """
return self._linked__parent
@property
def level(self):
""" Return group level """
if not hasattr(self, '_level'):
return -1
return self._level
# @property
# def status(self):
# """Return real status string from the real state identifier"""
# return self.overall_state
#
# @property
# def overall_state(self):
# """Return real state identifier"""
# return self._overall_state
#
# @overall_state.setter
# def overall_state(self, overall_state):
# """
# Set Item object overall_state
# """
# self._overall_state = overall_state
#
# @property
# def overall_status(self):
# """Return real status string from the real state identifier"""
# return self.overall_state_to_status[self._overall_state]
|
agpl-3.0
| -7,640,984,566,309,661,000
| 30.119266
| 98
| 0.637972
| false
| 4.009456
| false
| false
| false
|
mitsuhiko/sentry
|
src/sentry/db/models/base.py
|
1
|
3272
|
"""
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.db import models
from django.db.models import signals
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id',) + attrs
def _repr(self):
cls = type(self).__name__
pairs = (
'%s=%s' % (a, repr(getattr(self, a, None)))
for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.name, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
try:
data[f.column] = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(unicode(e))
self.__data = data
else:
self.__data = UNSAVED
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
return self.__data.get(field_name) != self.__get_field_value(field)
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVED:
return None
return self.__data.get(field_name)
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
def __model_class_prepared(sender, **kwargs):
if not issubclass(sender, BaseModel):
return
if not hasattr(sender, '__core__'):
raise ValueError('{!r} model has not defined __core__'.format(sender))
signals.post_save.connect(__model_post_save)
signals.class_prepared.connect(__model_class_prepared)
|
bsd-3-clause
| 4,136,426,230,593,140,000
| 25.387097
| 78
| 0.585269
| false
| 3.853946
| false
| false
| false
|
florian-f/sklearn
|
sklearn/linear_model/coordinate_descent.py
|
1
|
47058
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD Style.
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data, center_data
from ..utils import array2d, atleast2d_or_csc, deprecated
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
# now all computation with X can be done inplace
fit = self._sparse_fit if sparse.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
# copy was done in fit if necessary
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
precompute = self.precompute
if hasattr(precompute, '__array__') \
and not np.allclose(X_mean, np.zeros(n_features)) \
and not np.allclose(X_std, np.ones(n_features)):
# recompute Gram
precompute = 'auto'
Xy = None
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
# precompute if n_samples > n_features
if precompute == "auto" and n_samples > n_features:
precompute = True
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute:
Gram = np.dot(X.T, X)
else:
Gram = None
for k in xrange(n_targets):
if Gram is None:
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X, y[:, k], self.max_iter,
self.tol, self.positive)
else:
Gram = Gram.copy()
if Xy is None:
this_Xy = np.dot(X.T, y[:, k])
else:
this_Xy = Xy[:, k]
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent_gram(
coef_[k, :], l1_reg, l2_reg, Gram, this_Xy, y[:, k],
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
X_data, y, X_mean, y_mean, X_std = sparse_center_data(
X, y, self.fit_intercept, self.normalize)
if y.ndim == 1:
y = y[:, np.newaxis]
n_samples, n_features = X.shape[0], X.shape[1]
n_targets = y.shape[1]
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
for k in xrange(n_targets):
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.sparse_enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X_data, X.indices,
X.indptr, y[:, k], X_mean / X_std,
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _init_coef(self, coef_init, n_features, n_targets):
if coef_init is None:
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
else:
coef_ = self.coef_
else:
coef_ = coef_init
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
if coef_.shape != (n_targets, n_features):
raise ValueError("X and coef_init have incompatible "
"shapes (%s != %s)."
% (coef_.shape, (n_targets, n_features)))
return coef_
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``dual_gap_`` : float | array, shape = (n_targets,)
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := ``dual_gap_``
``eps_`` : float | array, shape = (n_targets,)
``eps_`` is used to check if the fit converged to the requested
``tol``
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficents between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> coef_path = [e.coef_ for e in lasso_path(X, y, alphas=[5., 1., .5], fit_intercept=False)]
>>> print(np.array(coef_path).T)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1], coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False, rho=None,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
if rho is not None:
l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
# From now on X can be touched inplace
if not sparse.isspmatrix(X):
X, y, X_mean, y_mean, X_std = center_data(X, y, fit_intercept,
normalize, copy=False)
# XXX : in the sparse case the data will be centered
# at each fit...
n_samples, n_features = X.shape
if (hasattr(precompute, '__array__')
and not np.allclose(X_mean, np.zeros(n_features))
and not np.allclose(X_std, np.ones(n_features))):
# recompute Gram
precompute = 'auto'
Xy = None
if precompute or ((precompute == 'auto') and (n_samples > n_features)):
if sparse.isspmatrix(X):
warnings.warn("precompute is ignored for sparse data")
precompute = False
else:
precompute = np.dot(X.T, X)
if Xy is None:
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1):
this_mses = list()
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
models_train = path(X[train], y[train], **path_params)
this_mses = np.empty(len(models_train))
for i_model, model in enumerate(models_train):
y_ = model.predict(X[test])
this_mses[i_model] = ((y_ - y[test]) ** 2).mean()
return this_mses, l1_ratio
class LinearModelCV(LinearModel):
"""Base class for iterative model fitting along a regularization path"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
# Start to compute path on full data
# XXX: is this really useful: we are fitting models that we won't
# use later
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
model = models[i_best_alpha]
best_l1_ratio = l1_ratio
if hasattr(model, 'l1_ratio'):
if model.l1_ratio != best_l1_ratio:
# Need to refit the model
model.l1_ratio = best_l1_ratio
model.fit(X, y)
self.l1_ratio_ = model.l1_ratio
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha_ = model.alpha
self.alphas_ = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = np.squeeze(all_mse_paths)
return self
@property
def rho_(self):
warnings.warn("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15", DeprecationWarning)
return self.l1_ratio_
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization choosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization choosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
@property
@deprecated("rho was renamed to ``l1_ratio_`` and will be removed "
"in 0.15")
def rho(self):
return self.l1_ratio_
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, rho=None, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
else:
self.coef_ = coef_init
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
|
bsd-3-clause
| 7,439,259,739,322,190,000
| 34.515472
| 97
| 0.585596
| false
| 3.90102
| false
| false
| false
|
keflavich/pyspeckit-obsolete
|
examples/n2hp_cube_example.py
|
1
|
2271
|
import pyspeckit
import os
if not os.path.exists('n2hp_cube.fit'):
import astropy.utils.data as aud
from astropy.io import fits
f = aud.download_file('ftp://cdsarc.u-strasbg.fr/pub/cats/J/A%2BA/472/519/fits/opha_n2h.fit')
with fits.open(f) as ff:
ff[0].header['CUNIT3'] = 'm/s'
for kw in ['CTYPE4','CRVAL4','CDELT4','CRPIX4']:
del ff[0].header[kw]
ff.writeto('n2hp_cube.fit')
# Load the spectral cube
spc = pyspeckit.Cube('n2hp_cube.fit')
# Register the fitter
# The N2H+ fitter is 'built-in' but is not registered by default; this example
# shows how to register a fitting procedure
# 'multi' indicates that it is possible to fit multiple components and a
# background will not automatically be fit 4 is the number of parameters in the
# model (excitation temperature, optical depth, line center, and line width)
spc.Registry.add_fitter('n2hp_vtau',pyspeckit.models.n2hp.n2hp_vtau_fitter,4,multisingle='multi')
# Run the fitter
spc.fiteach(fittype='n2hp_vtau', multifit=True,
guesses=[5,0.5,3,1], # Tex=5K, tau=0.5, v_center=12, width=1 km/s
signal_cut=6, # minimize the # of pixels fit for the example
)
# There are a huge number of parameters for the fiteach procedure. See:
# http://pyspeckit.readthedocs.org/en/latest/example_nh3_cube.html
# http://pyspeckit.readthedocs.org/en/latest/cubes.html?highlight=fiteach#pyspeckit.cubes.SpectralCube.Cube.fiteach
#
# Unfortunately, a complete tutorial on this stuff is on the to-do list;
# right now the use of many of these parameters is at a research level.
# However, pyspeckit@gmail.com will support them! They are being used
# in current and pending publications
# Save the fitted parameters to a FITS file, and overwrite one if one exists
spc.write_fit('n2hp_fitted_parameters.fits', clobber=True)
# Show an integrated image
spc.mapplot()
# This particular cube is a 2x2 image; you can click on any pixel to see its
# spectrum & fit
# plot one of the fitted spectra
spc.plot_spectrum(14,27,plot_fit=True)
# Show an image of the best-fit velocity
spc.mapplot.plane = spc.parcube[2,:,:]
spc.mapplot(estimator=None)
# running in script mode, the figures won't show by default on some systems
import pylab as pl
pl.show()
|
mit
| -5,961,305,820,016,318,000
| 39.553571
| 115
| 0.722589
| false
| 3.011936
| false
| false
| false
|
mrosenstihl/PulsePrograms
|
LED/LED_res.py
|
1
|
1841
|
class ParameterSet:
"""
From
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
Alex Martelli
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def cyclops(timesignal, r_phase, accumulation_object):
"""
This is CYCLOPS phase cycling.
Receiver phase must advance with each step by 90.
Real channel and Imaginary channel get subtracted/added to the Real/Imag channel
of the current accumulation.
"""
if r_phase%4 == 0:# in [0,4,8,12]
ts = timesignal+0
ts.y[0] = timesignal.y[0]
ts.y[1] = timesignal.y[1]
accumulation_object += ts
if (r_phase-1)%4 == 0:#[1,5,9,13]:
ts = timesignal+0
ts.y[0] = -1*timesignal.y[1]
ts.y[1] = timesignal.y[0]
accumulation_object += ts
if (r_phase-2)%4 == 0:#[2,6,10,14]
ts = timesignal+0
ts.y[0] = -1*timesignal.y[0]
ts.y[1] = -1*timesignal.y[1]
accumulation_object += ts
if (r_phase-3)%4 == 0: #in [3,7,11,15]:
ts = timesignal+0
ts.y[0] = timesignal.y[1]
ts.y[1] = -1*timesignal.y[0]
accumulation_object += ts
def result():
for res in results:
if not isinstance(res, ADC_Result):
print "ERROR: ", res
continue
descriptions = res.get_description_dictionary()
# rebuild the dictionary because __init__ can't take unicode keys
temp_description={}
for key in descriptions:
temp_description[str(key)] = descriptions[key]
descriptions=temp_description
desc = ParameterSet(**descriptions)
data["Timesignal"]=res
if int(desc.run)%int(desc.accu_length) == 0:
accu=Accumulation()
cyclops(res,int(desc.cyclops),accu)
data["Accu %.1e"%(float(desc.tmix))]=accu
|
bsd-2-clause
| 3,961,073,646,389,585,000
| 31.315789
| 84
| 0.573601
| false
| 3.207317
| false
| false
| false
|
enthought/sandia-data-archive
|
sdafile/tests/test_sda_file.py
|
1
|
26970
|
import io
import os
import random
import shutil
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from sdafile.exceptions import BadSDAFile
from sdafile.sda_file import SDAFile
from sdafile.testing import (
BAD_ATTRS, GOOD_ATTRS, MockRecordInserter, TEST_NUMERIC, TEST_CHARACTER,
TEST_LOGICAL, TEST_SPARSE, TEST_SPARSE_COMPLEX, TEST_CELL, TEST_STRUCTURE,
TEST_UNSUPPORTED, data_path, temporary_file, temporary_h5file
)
from sdafile.utils import (
get_decoded, get_record_type, set_encoded, write_header,
)
class TestSDAFileInit(unittest.TestCase):
def test_mode_r(self):
self.assertInitNew('r', exc=IOError)
self.assertInitExisting('r', {}, BadSDAFile)
self.assertInitExisting('r', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('r', GOOD_ATTRS)
def test_mode_r_plus(self):
self.assertInitNew('r+', exc=IOError)
self.assertInitExisting('r+', exc=BadSDAFile)
self.assertInitExisting('r+', exc=BadSDAFile)
self.assertInitExisting('r+', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('r+', GOOD_ATTRS)
def test_mode_w(self):
self.assertInitNew('w')
self.assertInitExisting('w')
def test_mode_x(self):
self.assertInitNew('x')
self.assertInitExisting('x', exc=IOError)
def test_mode_w_minus(self):
self.assertInitNew('w-')
self.assertInitExisting('w-', exc=IOError)
def test_mode_a(self):
self.assertInitNew('a')
self.assertInitExisting('a', GOOD_ATTRS)
self.assertInitExisting('a', BAD_ATTRS, BadSDAFile)
self.assertInitExisting('a', {}, BadSDAFile)
def test_mode_default(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name)
self.assertEqual(sda_file.mode, 'a')
def test_pass_kw(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w', driver='core')
with sda_file._h5file('r') as h5file:
self.assertEqual(h5file.driver, 'core')
def assertAttrs(self, sda_file, attrs={}):
""" Assert sda_file attributes are equal to passed values.
if ``attrs`` is empty, check that ``attrs`` take on the default values.
"""
if attrs == {}: # treat as if new
self.assertEqual(sda_file.Created, sda_file.Updated)
attrs = {}
write_header(attrs)
del attrs['Created']
del attrs['Updated']
attrs = get_decoded(attrs)
for attr, expected in attrs.items():
actual = getattr(sda_file, attr)
self.assertEqual(actual, expected)
def assertInitExisting(self, mode, attrs={}, exc=None):
""" Assert attributes or error when init with existing file.
Passed ``attrs`` are used when creating the existing file. When ``exc``
is None, this also tests that the ``attrs`` are preserved.
"""
with temporary_h5file() as h5file:
name = h5file.filename
if attrs is not None and len(attrs) > 0:
set_encoded(h5file.attrs, **attrs)
h5file.close()
if exc is not None:
with self.assertRaises(exc):
SDAFile(name, mode)
else:
sda_file = SDAFile(name, mode)
self.assertAttrs(sda_file, attrs)
def assertInitNew(self, mode, attrs={}, exc=None):
""" Assert attributes or error when init with non-existing file. """
with temporary_file() as file_path:
os.remove(file_path)
if exc is not None:
with self.assertRaises(exc):
SDAFile(file_path, mode)
else:
sda_file = SDAFile(file_path, mode)
self.assertAttrs(sda_file)
class TestSDAFileProperties(unittest.TestCase):
def test_file_properties(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
self.assertEqual(sda_file.mode, 'w')
self.assertEqual(sda_file.name, file_path)
def test_set_writable(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
self.assertEqual(sda_file.Writable, 'yes')
sda_file.Writable = 'no'
self.assertEqual(sda_file.Writable, 'no')
with self.assertRaises(ValueError):
sda_file.Writable = True
with self.assertRaises(ValueError):
sda_file.Writable = False
sda_file = SDAFile(file_path, 'r')
with self.assertRaises(ValueError):
sda_file.Writable = 'yes'
class TestSDAFileInsert(unittest.TestCase):
def test_read_only(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name, 'r')
with self.assertRaises(IOError):
sda_file.insert('test', [1, 2, 3])
def test_no_write(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.Writable = 'no'
with self.assertRaises(IOError):
sda_file.insert('test', [1, 2, 3])
def test_invalid_deflate(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=-1)
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=10)
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3], deflate=None)
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('test/', [1, 2, 3])
with self.assertRaises(ValueError):
sda_file.insert('test\\', [1, 2, 3])
def test_label_exists(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', [1, 2, 3])
with self.assertRaises(ValueError):
sda_file.insert('test', [1, 2, 3])
def test_timestamp_update(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.insert('test', [0, 1, 2])
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_invalid_structure_key(self):
record = [0, 1, 2, {' bad': np.arange(4)}]
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.insert('something_bad', record)
self.assertEqual(sda_file.labels(), [])
def test_insert_called(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
called = []
sda_file._registry._inserters = [MockRecordInserter(called)]
sda_file.insert('foo', True, 'insert_called', 0)
self.assertEqual(called, ['insert_called'])
def test_structures(self):
structure = {
'foo': 'foo',
'bar': np.arange(4),
'baz': np.array([True, False])
}
failures = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_STRUCTURE +
TEST_STRUCTURE + TEST_SPARSE + TEST_SPARSE_COMPLEX
)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
# Store homogeneous structures
label = 'test'
deflate = 0
objs = [structure] * 5
sda_file.insert(label, objs, label, deflate, as_structures=True)
# Check the type
with sda_file._h5file('r') as h5file:
record_type = get_record_type(h5file[label].attrs)
self.assertEqual(record_type, 'structures')
# Other record types should fail
for data in failures:
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
# Inhomogenous records should fail
data = [structure, structure.copy()]
data[0]['baz'] = 10 # change record type
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
del data[0]['baz']
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
# Cell of non-structures should fail
data = [True]
with self.assertRaises(ValueError):
sda_file.insert('bad', data, 'bad', 0, as_structures=True)
def test_from_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
contents = b'01'
with temporary_file() as source_file:
with open(source_file, 'wb') as f:
f.write(contents)
label = sda_file.insert_from_file(source_file)
sda_file.describe(label, label)
self.assertTrue(source_file.endswith(label))
def test_from_file_failure(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with temporary_file() as source_file:
pass
# The source file is gone
with self.assertRaises(ValueError):
sda_file.insert_from_file(source_file)
def test_unsupported(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
for i, obj in enumerate(TEST_UNSUPPORTED):
label = 'test' + str(i)
with self.assertRaises(ValueError):
sda_file.insert(label, obj, label, 0)
# Make sure the 'Updated' attr does not change
self.assertEqual(sda_file.Updated, 'Unmodified')
class TestSDAFileExtract(unittest.TestCase):
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.extract('test/')
with self.assertRaises(ValueError):
sda_file.extract('test\\')
def test_label_not_exists(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.extract('test')
def test_no_timestamp_update(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', [0, 1, 2])
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.extract('test')
self.assertEqual(sda_file.Updated, 'Unmodified')
def test_round_trip(self):
test_set = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_STRUCTURE
)
def assert_nested_equal(a, b):
# Unravel lists and tuples
if isinstance(a, (list, tuple)) or isinstance(b, (list, tuple)):
assert_equal(len(a), len(b))
for item_a, item_b in zip(a, b):
assert_nested_equal(item_a, item_b)
else:
return assert_equal(a, b)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(test_set):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
assert_equal(extracted, data)
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(TEST_CELL):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
assert_nested_equal(extracted, data)
test_set = TEST_SPARSE + TEST_SPARSE_COMPLEX
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
for i, data in enumerate(test_set):
label = "test" + str(i)
sda_file.insert(label, data, '', i % 10)
extracted = sda_file.extract(label)
expected = data.tocoo()
self.assertEqual(extracted.dtype, expected.dtype)
assert_equal(extracted.row, expected.row)
assert_equal(extracted.col, expected.col)
assert_equal(extracted.data, expected.data)
def test_to_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
contents = b'Hello world'
sda_file.insert('test', io.BytesIO(contents))
with temporary_file() as destination_path:
with self.assertRaises(IOError):
sda_file.extract_to_file('test', destination_path)
sda_file.extract_to_file('test', destination_path, True)
with open(destination_path, 'rb') as f:
extracted = f.read()
self.assertEqual(extracted, contents)
# The file is closed and gone, try again
sda_file.extract_to_file('test', destination_path, True)
with open(destination_path, 'rb') as f:
extracted = f.read()
self.assertEqual(extracted, contents)
def test_to_file_non_file(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', 'not a file record')
with temporary_file() as destination_path:
with self.assertRaises(ValueError):
sda_file.extract_to_file('test', destination_path, True)
class TestSDAFileDescribe(unittest.TestCase):
def test_read_only(self):
with temporary_h5file() as h5file:
name = h5file.filename
set_encoded(h5file.attrs, **GOOD_ATTRS)
h5file.close()
sda_file = SDAFile(name, 'r')
with self.assertRaises(IOError):
sda_file.describe('test', 'a test')
def test_no_write(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.Writable = 'no'
with self.assertRaises(IOError):
sda_file.describe('test', 'a test')
def test_invalid_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.describe('test/', 'a test')
with self.assertRaises(ValueError):
sda_file.describe('test\\', 'a test')
def test_missing_label(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with self.assertRaises(ValueError):
sda_file.describe('test', 'a test')
def test_happy_path(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.insert('test', [1, 2, 3])
sda_file.describe('test', 'second')
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['test'].attrs, 'Description')
self.assertEqual(attrs['Description'], 'second')
# Make sure the 'Updated' attr gets updated
self.assertNotEqual(sda_file.Updated, 'Unmodified')
class TestSDAFileMisc(unittest.TestCase):
def test_labels(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('l0', [0])
sda_file.insert('l1', [1])
self.assertEqual(sorted(sda_file.labels()), ['l0', 'l1'])
def test_remove(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
labels = []
test_set = (
TEST_NUMERIC + TEST_LOGICAL + TEST_CHARACTER + TEST_CELL +
TEST_STRUCTURE + TEST_STRUCTURE + TEST_SPARSE +
TEST_SPARSE_COMPLEX
)
for i, obj in enumerate(test_set):
label = 'test' + str(i)
labels.append(label)
sda_file.insert(label, obj)
with self.assertRaises(ValueError):
sda_file.remove()
with self.assertRaises(ValueError):
sda_file.remove('not a label')
random.shuffle(labels)
removed = labels[::2]
kept = labels[1::2]
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
sda_file.remove(*removed)
self.assertEqual(sorted(sda_file.labels()), sorted(kept))
# Make sure metadata is preserved and data can be extracted
with sda_file._h5file('r') as h5file:
for label in kept:
attrs = h5file[label].attrs
self.assertIn('Deflate', attrs)
self.assertIn('Description', attrs)
self.assertIn('RecordType', attrs)
self.assertIn('Empty', attrs)
sda_file.extract(label)
sda_file.remove(*kept)
self.assertEqual(sda_file.labels(), [])
self.assertEqual(sda_file.FormatVersion, '1.1')
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_probe(self):
cols = [
'RecordType', 'Description', 'Empty', 'Deflate', 'Complex',
'ArraySize', 'Sparse', 'RecordSize', 'Class', 'FieldNames',
'Command',
]
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
labels = []
for i, obj in enumerate(TEST_NUMERIC[:4]):
label = 'bar' + str(i)
labels.append(label)
sda_file.insert(label, obj, label, i)
for i, obj in enumerate(TEST_NUMERIC[4:6]):
label = 'foo' + str(i)
labels.append(label)
sda_file.insert(label, obj, label, i)
state = sda_file.probe()
state.sort_index()
self.assertEqual(len(state), 6)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels)
assert_array_equal(state['Description'], labels)
assert_array_equal(state['Deflate'], [0, 1, 2, 3, 0, 1])
state = sda_file.probe('bar.*')
state.sort_index()
self.assertEqual(len(state), 4)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels[:4])
assert_array_equal(state['Description'], labels[:4])
assert_array_equal(state['Deflate'], [0, 1, 2, 3])
state = sda_file.probe('foo.*')
state.sort_index()
self.assertEqual(len(state), 2)
assert_array_equal(state.columns, cols)
assert_array_equal(state.index, labels[4:])
assert_array_equal(state['Description'], labels[4:])
assert_array_equal(state['Deflate'], [0, 1])
class TestSDAFileReplaceUpdate(unittest.TestCase):
def test_replace(self):
with temporary_file() as file_path:
sda_file = SDAFile(file_path, 'w')
sda_file.insert('test', TEST_NUMERIC[0], 'test_description', 1)
replacements = TEST_NUMERIC[:1]
random.shuffle(replacements)
replacements = replacements[:10]
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
for new_data in replacements:
sda_file.replace('test', new_data)
assert_equal(sda_file.extract('test'), new_data)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(
h5file['test'].attrs, 'Deflate', 'Description'
)
self.assertEqual(attrs['Description'], 'test_description')
self.assertEqual(attrs['Deflate'], 1)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
def test_update_object_on_non_object(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example A1'
data = sda_file.extract('example I')
with self.assertRaises(ValueError):
sda_file.update_object(label, data)
def test_update_object_with_equivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
label = 'example I'
# Replace some stuff with the same type
data = sda_file.extract(label)
data['Parameter'] = np.arange(5)
sda_file.update_object(label, data)
extracted = sda_file.extract(label)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['example I'].attrs)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
# Validate equality
self.assertEqual(attrs['RecordType'], 'object')
self.assertEqual(attrs['Class'], 'ExampleObject')
self.assertIsInstance(extracted, dict)
self.assertEqual(len(extracted), 1)
assert_equal(extracted['Parameter'], data['Parameter'])
def test_update_object_with_inequivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example I'
# Replace some stuff with different type
data = sda_file.extract(label)
data['Parameter'] = 'hello world'
with self.assertRaises(ValueError):
sda_file.update_object(label, data)
def test_update_object_with_non_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example I'
# Replace some stuff with a non-dictionary
with self.assertRaises(ValueError):
sda_file.update_object(label, 'hello')
def test_update_objects_on_non_objects(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example A1'
data = sda_file.extract('example J')
with self.assertRaises(ValueError):
sda_file.update_objects(label, data)
def test_update_objects_with_equivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
with sda_file._h5file('a') as h5file:
set_encoded(h5file.attrs, Updated='Unmodified')
label = 'example J'
# Replace some stuff with the same type
data = sda_file.extract(label)
data[0, 0]['Parameter'] = np.arange(5)
sda_file.update_objects(label, data)
extracted = sda_file.extract(label)
with sda_file._h5file('r') as h5file:
attrs = get_decoded(h5file['example J'].attrs)
self.assertNotEqual(sda_file.Updated, 'Unmodified')
# Validate equality
self.assertEqual(attrs['RecordType'], 'objects')
self.assertEqual(attrs['Class'], 'ExampleObject')
self.assertIsInstance(extracted, np.ndarray)
self.assertEqual(extracted.shape, (2, 1))
assert_equal(extracted[0, 0]['Parameter'], data[0, 0]['Parameter'])
assert_equal(extracted[1, 0]['Parameter'], data[1, 0]['Parameter'])
def test_update_objects_with_inequivalent_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example J'
# Replace some stuff with different type
data = sda_file.extract(label)
data[0, 0]['Parameter'] = 'hello world'
with self.assertRaises(ValueError):
sda_file.update_objects(label, data)
def test_update_objects_with_non_record(self):
reference_path = data_path('SDAreference.sda')
with temporary_file() as file_path:
# Copy the reference, which as an object in it.
shutil.copy(reference_path, file_path)
sda_file = SDAFile(file_path, 'a')
label = 'example J'
# Replace some stuff with a non-dictionary
with self.assertRaises(ValueError):
sda_file.update_objects(label, 'hello')
|
bsd-3-clause
| -7,877,897,679,665,350,000
| 35.743869
| 79
| 0.559473
| false
| 3.834234
| true
| false
| false
|
sio2project/oioioi
|
oioioi/contestlogo/models.py
|
1
|
1997
|
import os.path
from django.db import models
from django.utils import timezone
from django.utils.text import get_valid_filename
from django.utils.translation import ugettext_lazy as _
from oioioi.contests.models import Contest
from oioioi.filetracker.fields import FileField
def make_logo_filename(instance, filename):
return 'logo/%s/%s' % (
instance.contest.id,
get_valid_filename(os.path.basename(filename)),
)
class ContestLogo(models.Model):
contest = models.OneToOneField(
Contest, verbose_name=_("contest"), primary_key=True, on_delete=models.CASCADE
)
image = FileField(upload_to=make_logo_filename, verbose_name=_("logo image"))
updated_at = models.DateTimeField(default=timezone.now)
link = models.URLField(
blank=True, null=True, verbose_name=_("external contest webpage url")
)
def save(self, *args, **kwargs):
self.updated_at = timezone.now()
return super(ContestLogo, self).save(*args, **kwargs)
@property
def filename(self):
return os.path.split(self.image.name)[1]
class Meta(object):
verbose_name = _("contest logo")
verbose_name_plural = _("contest logo")
def make_icon_filename(instance, filename):
return 'icons/%s/%s' % (
instance.contest.id,
get_valid_filename(os.path.basename(filename)),
)
class ContestIcon(models.Model):
contest = models.ForeignKey(
Contest, verbose_name=_("contest"), on_delete=models.CASCADE
)
image = FileField(upload_to=make_icon_filename, verbose_name=_("icon image"))
updated_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
self.updated_at = timezone.now()
return super(ContestIcon, self).save(*args, **kwargs)
@property
def filename(self):
return os.path.split(self.image.name)[1]
class Meta(object):
verbose_name = _("contest icon")
verbose_name_plural = _("contest icons")
|
gpl-3.0
| -4,766,905,731,717,114,000
| 29.257576
| 86
| 0.667501
| false
| 3.630909
| true
| false
| false
|
wimac/home
|
Dropbox/skel/bin/sick-beard/sickbeard/metadata/tivo.py
|
1
|
13268
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# Author: Gordon Turner <gordonturner@gordonturner.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import sickbeard
#from sickbeard.common import *
from sickbeard import logger, exceptions, helpers
from sickbeard.metadata import generic
from sickbeard import encodingKludge as ek
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class TIVOMetadata(generic.GenericMetadata):
"""
Metadata generation class for TIVO
The following file structure is used:
show_root/Season 01/show - 1x01 - episode.avi.txt (* existing episode)
show_root/Season 01/.meta/show - 1x01 - episode.avi.txt (episode metadata)
This class only generates episode specific metadata files, it does NOT generated a default.txt file.
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
poster=False,
fanart=False,
episode_thumbnails=False,
season_thumbnails=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
poster,
fanart,
episode_thumbnails,
season_thumbnails)
self._ep_nfo_extension = "txt"
self.generate_ep_metadata = True
self.name = 'TIVO'
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.txt"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "<i>not supported</i>"
self.eg_episode_thumbnails = "<i>not supported</i>"
self.eg_season_thumbnails = "<i>not supported</i>"
# Override with empty methods for unsupported features.
def create_show_metadata(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def get_episode_thumb_path(self, ep_obj):
pass
def get_season_thumb_path(self, show_obj, season):
pass
def retrieveShowMetadata(self, dir):
return (None, None)
# Override and implement features for Tivo.
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/.meta/episode.txt path for Tivo
episode metadata files.
Note, that pyTivo requires the metadata filename to include the original extention.
ie If the episode name is foo.avi, the metadata name is foo.avi.txt
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta')
metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name)
else:
logger.log(u"Episode location doesn't exist: "+str(ep_obj.location), logger.DEBUG)
return ''
return metadata_file_path
def _ep_data(self, ep_obj):
"""
Creates a key value structure for a Tivo episode metadata file and
returns the resulting data object.
ep_obj: a TVEpisode instance to create the metadata file for.
Lookup the show in http://thetvdb.com/ using the python library:
https://github.com/dbr/tvdb_api/
The results are saved in the object myShow.
The key values for the tivo metadata file are from:
http://pytivo.sourceforge.net/wiki/index.php/Metadata
"""
data = "";
eps_to_write = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
myShow = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(str(e))
except tvdb_exceptions.tvdb_error, e:
logger.log("Unable to connect to TVDB while creating meta files - skipping - "+str(e), logger.ERROR)
return False
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log("Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?")
return None
if myEp["firstaired"] == None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["firstaired"] == None:
return None
if myShow["seriesname"] != None:
# Title of the series (The Simpsons, Seinfeld, etc.) or title of the movie (The Mummy, Spiderman, etc).
data += ("title : " + myShow["seriesname"] + "\n")
# Name of series (The Simpsons, Seinfeld, etc.). This should be included if the show is episodic.
# For movies, you may repeat the name of the movie (The Mummy, Spiderman, etc), leave blank, or omit.
data += ("seriesTitle : " + myShow["seriesname"] + "\n")
# Title of the episode (Pilot, Homer's Night Out, Episode 02, etc.) Should be included for episodic shows.
# Leave blank or omit for movies.
data += ("episodeTitle : " + curEpToWrite.name + "\n")
# This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter
# the season number followed by the episode number for that season. For example, enter 201 for season 2
# episode 01.
# This only shows up if you go into the Details from the Program screen.
# This seems to disappear once the video is transferred to TiVo.
# NOTE: May not be correct format, missing season, but based on description from wiki leaving as is.
data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n")
# Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses
# after the episode's title and before the description on the Program screen.
# FIXME: Hardcode isEpisode to true for now, not sure how to handle movies
data += ("isEpisode : true\n")
# Write the synopsis of the video here.
# Micrsoft Word's smartquotes can die in a fire.
sanitizedDescription = curEpToWrite.description
# Replace double curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"")
# Replace single curly quotes
sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'")
data += ("description : " + sanitizedDescription + "\n")
# Usually starts with "SH" and followed by 6-8 digits.
# Tivo uses zap2it for thier data, so the series id is the zap2it_id.
if myShow["zap2it_id"] != None:
data += ("seriesId : " + myShow["zap2it_id"] + "\n")
# This is the call sign of the channel the episode was recorded from.
if myShow["network"] != None:
data += ("callsign : " + myShow["network"] + "\n")
# This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also
# capitalized and never changes). This is the original air date of the episode.
# NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened.
if curEpToWrite.airdate != datetime.date.fromordinal(1):
data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n")
# This shows up at the beginning of the description on the Program screen and on the Details screen.
if myShow["actors"]:
for actor in myShow["actors"].split('|'):
if actor:
data += ("vActor : " + str(actor) + "\n")
# This is shown on both the Program screen and the Details screen. It uses a single digit to determine the
# number of stars: 1 for 1 star, 7 for 4 stars
if myShow["rating"] != None:
try:
rating = float(myShow['rating'])
except ValueError:
rating = 0.0
rating = rating / 10 * 4
data += ("starRating : " + str(rating) + "\n")
# This is shown on both the Program screen and the Details screen.
# It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR.
if myShow["contentrating"]:
data += ("tvRating : " + str(myShow["contentrating"]) + "\n")
# This field can be repeated as many times as necessary or omitted completely.
if ep_obj.show.genre:
for genre in ep_obj.show.genre.split('|'):
if genre:
data += ("vProgramGenre : " + str(genre) + "\n")
# NOTE: The following are metadata keywords are not used
# displayMajorNumber
# showingBits
# displayMinorNumber
# colorCode
# vSeriesGenre
# vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer
# partCount
# partIndex
return data
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log("Metadata dir didn't exist, creating it at "+nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to "+nfo_file_path)
nfo_file = ek.ek(open, nfo_file_path, 'w')
# Calling encode directly, b/c often descriptions have wonky characters.
nfo_file.write( data.encode( "utf-8" ) )
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to "+nfo_file_path+" - are you sure the folder is writable? "+str(e).decode('utf-8'), logger.ERROR)
return False
return True
# present a standard "interface"
metadata_class = TIVOMetadata
|
gpl-2.0
| 3,020,361,615,287,338,500
| 40.85489
| 176
| 0.562632
| false
| 4.210727
| false
| false
| false
|
jldbc/pybaseball
|
pybaseball/team_fielding.py
|
1
|
2821
|
import warnings
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
from . import cache
from .datahelpers import postprocessing
from .datasources.fangraphs import fg_team_fielding_data
# This is just a pass through for the new, more configurable function
team_fielding = fg_team_fielding_data
@cache.df_cache()
def team_fielding_bref(team, start_season, end_season=None):
"""
Get season-level Fielding Statistics for Specific Team (from Baseball-Reference)
ARGUMENTS:
team : str : The Team Abbreviation (i.e., 'NYY' for Yankees) of the Team you want data for
start_season : int : first season you want data for (or the only season if you do not specify an end_season)
end_season : int : final season you want data for
"""
if start_season is None:
raise ValueError(
"You need to provide at least one season to collect data for. " +
"Try team_fielding_bref(season) or team_fielding_bref(start_season, end_season)."
)
if end_season is None:
end_season = start_season
url = "https://www.baseball-reference.com/teams/{}".format(team)
data = []
headings = None
for season in range(start_season, end_season+1):
stats_url = "{}/{}-fielding.shtml".format(url, season)
response = requests.get(stats_url)
soup = BeautifulSoup(response.content, 'html.parser')
fielding_div = soup.find('div', {'id': 'all_standard_fielding'})
comment = fielding_div.find(
string=lambda text: isinstance(text, Comment))
fielding_hidden = BeautifulSoup(comment.extract(), 'html.parser')
table = fielding_hidden.find('table')
thead = table.find('thead')
if headings is None:
headings = [row.text.strip()
for row in thead.find_all('th')]
rows = table.find('tbody').find_all('tr')
for row in rows:
cols = row.find_all(['td', 'th'])
cols = [ele.text.strip() for ele in cols]
# Removes '*' and '#' from some names
cols = [col.replace('*', '').replace('#', '') for col in cols]
# Removes Team Totals and other rows
cols = [
col for col in cols if 'Team Runs' not in col
]
cols.insert(2, season)
data.append(cols)
headings.insert(2, "Year")
data = pd.DataFrame(data=data, columns=headings)
data = data.dropna() # Removes Row of All Nones
postprocessing.coalesce_nulls(data)
postprocessing.convert_percentages(data, ['CS%', 'lgCS%'])
postprocessing.convert_numeric(
data,
postprocessing.columns_except(
data,
['Team', 'Name', 'Pos\xa0Summary']
)
)
return data
|
mit
| 4,591,668,430,960,663,000
| 32.583333
| 115
| 0.609358
| false
| 3.82768
| false
| false
| false
|
agriffis/vcrpy-facebook
|
vcr_facebook/request.py
|
1
|
4164
|
from __future__ import absolute_import, unicode_literals, print_function
import hashlib
import logging
import re
import zlib
from .compat import OrderedDict, parse_qsl, quote
from .filters import (make_batch_relative_url_filter, make_multipart_filter, make_query_filter,
make_url_filter, make_elider_filter)
from .util import always_return
logger = logging.getLogger(__name__)
def wrap_before_record(wrapped, **kwargs):
before_record = make_before_record(**kwargs)
def wrapper(request):
request = before_record(request)
request = wrapped(request)
return request
return wrapper
def make_before_record(elide_appsecret_proof,
elide_access_token,
elide_client_secret,
elider_prefix):
appsecret_proof_filter = make_elider_filter(
'appsecret_proof',
elide_appsecret_proof and (
lambda q: elide_appsecret_proof(q['appsecret_proof'],
q['access_token'])),
elider_prefix,
)
access_token_filter = make_elider_filter(
'access_token',
elide_access_token and (
lambda q: elide_access_token(q['access_token'])),
elider_prefix,
)
input_token_filter = make_elider_filter(
'input_token',
elide_access_token and (
lambda q: elide_access_token(q['input_token'])),
elider_prefix,
)
client_secret_filter = make_elider_filter(
'client_secret',
elide_client_secret and (
lambda q: elide_client_secret(q['client_secret'])),
elider_prefix,
)
def _filter_body(body):
filters = [
make_multipart_filter(filter_uploads),
make_batch_relative_url_filter(appsecret_proof_filter),
make_batch_relative_url_filter(access_token_filter),
make_batch_relative_url_filter(input_token_filter),
make_batch_relative_url_filter(client_secret_filter),
make_query_filter(appsecret_proof_filter),
make_query_filter(access_token_filter),
make_query_filter(input_token_filter),
make_query_filter(client_secret_filter),
make_multipart_filter(appsecret_proof_filter),
make_multipart_filter(access_token_filter),
make_multipart_filter(input_token_filter),
make_multipart_filter(client_secret_filter),
]
for f in filters:
body = f(body)
return body
def _filter_headers(headers):
if 'content-length' in headers:
del headers['content-length']
return headers
def _filter_url(url):
filters = [
make_url_filter(appsecret_proof_filter),
make_url_filter(access_token_filter),
make_url_filter(client_secret_filter),
]
for f in filters:
url = f(url)
return url
def before_record(request):
if request.host != 'graph.facebook.com':
return request
request.body = _filter_body(request.body)
request.headers = _filter_headers(request.headers)
request.uri = _filter_url(request.uri)
request = filter_multipart_boundary(request)
return request
return before_record
def filter_uploads(parts):
for p in parts:
if b'; filename="' in p.header and len(p.content) > 100:
p.content = hashlib.md5(p.content).hexdigest()
return parts
MULTIPART_BOUNDARY = b'xxBOUNDARY' * 10
def filter_multipart_boundary(request):
content_type = request.headers.get('content-type', '')
prefix, equals, boundary = content_type.partition('=')
if boundary and prefix == 'multipart/form-data; boundary':
boundary = MULTIPART_BOUNDARY[:len(boundary)]
request.headers['content-type'] = b'{0}={1}'.format(prefix, boundary)
def filter(parts):
assert len(parts.boundary) == len(boundary)
parts.boundary = boundary
return parts
request.body = make_multipart_filter(filter)(request.body)
return request
|
mit
| -3,190,449,575,350,943,000
| 32.047619
| 95
| 0.60879
| false
| 3.984689
| false
| false
| false
|
HewlettPackard/oneview-ansible
|
test/test_hpe_icsp_server.py
|
1
|
10504
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import json
import pytest
import yaml
from oneview_module_loader import ICspHelper
from hpe_icsp_server import (ICspServerModule,
main as hpe_icsp_server_main)
from hpICsp.exceptions import HPICspInvalidResource
MODULE_NAME = 'hpe_icsp_server'
SERVER_IP = "16.124.135.239"
YAML_SERVER_PRESENT = """
state: present
api_version: 300
icsp_host: "16.124.133.245"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
server_username: "Admin"
server_password: "serveradmin"
server_port: 443
"""
YAML_SERVER_ABSENT = """
state: absent
api_version: 300
icsp_host: "16.124.133.251"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
"""
YAML_NETWORK_CONFIGURED = """
state: network_configured
api_version: 300
icsp_host: "16.124.133.245"
username: "Administrator"
password: "admin"
server_ipAddress: "16.124.135.239"
server_username: "Admin"
server_password: "serveradmin"
server_port: 443
server_personality_data:
network_config:
hostname: "test-web.io.fc.hpe.com"
domain: "demo.com"
interfaces:
- macAddress: "01:23:45:67:89:ab"
enabled: true
dhcpv4: false
ipv6Autoconfig:
dnsServers:
- "16.124.133.2"
staticNetworks:
- "16.124.133.39/255.255.255.0"
vlanid: -1
ipv4gateway: "16.124.133.1"
ipv6gateway:
virtualInterfaces:
"""
DEFAULT_SERVER = {"name": "SP-01", "uri": "/uri/239", "ilo": {"ipAddress": SERVER_IP}}
SERVER_ADDED = {"name": "SP-03", "uri": "/uri/188", "ilo": {"ipAddress": "16.124.135.188"}}
SERVERS = {
"members": [
DEFAULT_SERVER,
{"name": "SP-02", "uri": "/uri/233", "ilo": {"ipAddress": "16.124.135.233"}}
]
}
CONNECTION = {}
ICSP_JOBS = {}
JOB_RESOURCE = {"uri": "/rest/os-deployment-jobs/123456"}
class TestIcspServer():
@pytest.fixture(autouse=True)
def setUp(self):
self.patcher_ansible_module = mock.patch(MODULE_NAME + '.AnsibleModule')
self.mock_ansible_module = self.patcher_ansible_module.start()
self.mock_ansible_instance = mock.Mock()
self.mock_ansible_module.return_value = self.mock_ansible_instance
self.patcher_icsp_service = mock.patch(MODULE_NAME + '.hpICsp')
self.mock_icsp = self.patcher_icsp_service.start()
self.mock_connection = mock.Mock()
self.mock_connection.login.return_value = CONNECTION
self.mock_icsp.connection.return_value = self.mock_connection
self.mock_server_service = mock.Mock()
self.mock_icsp.servers.return_value = self.mock_server_service
yield
self.patcher_ansible_module.stop()
self.patcher_icsp_service.stop()
def test_should_not_add_server_when_already_present(self):
self.mock_connection.get.return_value = SERVERS
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
ICspServerModule().run()
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
msg=ICspServerModule.SERVER_ALREADY_PRESENT,
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_should_add_server(self):
self.mock_connection.get.side_effect = [{'members': []}, SERVERS]
self.mock_server_service.add_server.return_value = JOB_RESOURCE
self.mock_icsp.jobs.return_value = ICSP_JOBS
self.mock_icsp.common = mock.Mock()
self.mock_icsp.common.monitor_execution.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
hpe_icsp_server_main()
ilo_body = {'ipAddress': "16.124.135.239",
'username': "Admin",
'password': "serveradmin",
'port': 443}
self.mock_server_service.add_server.assert_called_once_with(ilo_body)
self.mock_icsp.common.monitor_execution.assert_called_once_with(JOB_RESOURCE, ICSP_JOBS)
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg="Server created: '/uri/239'",
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_expect_exception_not_caught_when_create_server_raise_exception(self):
self.mock_connection.get.side_effect = [{'members': []}, SERVERS]
self.mock_server_service.add_server.side_effect = Exception("message")
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_PRESENT)
try:
ICspServerModule().run()
except Exception as e:
assert "message" == e.args[0]
else:
pytest.fail("Expected Exception was not raised")
def test_should_not_try_delete_server_when_it_is_already_absent(self):
self.mock_connection.get.return_value = {'members': []}
self.mock_server_service.delete_server.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_server_service.delete_server.assert_not_called()
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=False,
msg=ICspServerModule.SERVER_ALREADY_ABSENT
)
def test_should_delete_server(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.delete_server.return_value = {}
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_server_service.delete_server.assert_called_once_with("/uri/239")
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg="Server '/uri/239' removed successfully from ICsp."
)
def test_should_fail_with_all_exe_attr_when_HPICspException_raised_on_delete(self):
self.mock_connection.get.return_value = SERVERS
exeption_value = {"message": "Fake Message", "details": "Details", "errorCode": "INVALID_RESOURCE"}
self.mock_server_service.delete_server.side_effect = HPICspInvalidResource(exeption_value)
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
# Load called args and convert to dict to prevent str comparison with different reordering (Python 3.5)
fail_json_args_msg = self.mock_ansible_instance.fail_json.call_args[1]['msg']
error_raised = json.loads(fail_json_args_msg)
assert error_raised == exeption_value
def test_should_fail_with_args_joined_when_common_exception_raised_on_delete(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.delete_server.side_effect = Exception("Fake Message", "INVALID_RESOURCE")
self.mock_ansible_instance.params = yaml.load(YAML_SERVER_ABSENT)
ICspServerModule().run()
self.mock_ansible_instance.fail_json.assert_called_once_with(msg='Fake Message; INVALID_RESOURCE')
def test_should_configure_network(self):
self.mock_connection.get.side_effect = [SERVERS, SERVERS]
self.mock_connection.post.return_value = JOB_RESOURCE
self.mock_server_service.get_server.return_value = DEFAULT_SERVER
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
ICspServerModule().run()
network_config_state = yaml.load(YAML_NETWORK_CONFIGURED)
network_config = {
"serverData": [
{"serverUri": DEFAULT_SERVER['uri'], "personalityData": network_config_state['server_personality_data'],
"skipReboot": True}],
"failMode": None,
"osbpUris": []
}
uri = '/rest/os-deployment-jobs/?writeOnly=true'
self.mock_connection.post.assert_called_once_with(uri, network_config)
self.mock_ansible_instance.exit_json.assert_called_once_with(
changed=True,
msg=ICspServerModule.CUSTOM_ATTR_NETWORK_UPDATED,
ansible_facts=dict(target_server=DEFAULT_SERVER)
)
def test_should_fail_when_try_configure_network_without_inform_personality_data(self):
self.mock_connection.get.return_value = SERVERS
self.mock_server_service.get_server.return_value = DEFAULT_SERVER
params_config_network = yaml.load(YAML_NETWORK_CONFIGURED)
params_config_network['server_personality_data'] = {}
self.mock_ansible_instance.params = params_config_network
ICspServerModule().run()
self.mock_ansible_instance.fail_json.assert_called_once_with(msg=ICspServerModule.SERVER_PERSONALITY_DATA_REQUIRED)
def test_should_fail_when_try_configure_network_for_not_found_server(self):
self.mock_connection.get.return_value = {'members': []}
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
ICspServerModule().run()
self.mock_ansible_instance.exit_json.assert_called_once_with(changed=False,
msg=ICspServerModule.SERVER_NOT_FOUND)
def test_expect_exception_not_caught_when_configure_network_raise_exception(self):
self.mock_connection.get.return_value = SERVERS
self.mock_connection.post.side_effect = Exception("message")
self.mock_ansible_instance.params = yaml.load(YAML_NETWORK_CONFIGURED)
try:
hpe_icsp_server_main()
except Exception as e:
assert "message" == e.args[0]
else:
pytest.fail("Expected Exception was not raised")
if __name__ == '__main__':
pytest.main([__file__])
|
apache-2.0
| -3,992,395,171,992,851,500
| 34.727891
| 123
| 0.647944
| false
| 3.581316
| true
| false
| false
|
PhonologicalCorpusTools/CorpusTools
|
corpustools/gui/ppgui.py
|
1
|
16303
|
import os
from collections import OrderedDict
from .imports import *
from corpustools.phonoprob.phonotactic_probability import (phonotactic_probability,
phonotactic_probability_all_words)
from corpustools.neighdens.io import load_words_neighden
from corpustools.corpus.classes import Attribute
from corpustools.exceptions import PCTError, PCTPythonError
from .windows import FunctionWorker, FunctionDialog
from .widgets import (RadioSelectWidget, FileWidget, TierWidget, RestrictedContextWidget)
from .corpusgui import AddWordDialog
from corpustools.contextmanagers import (CanonicalVariantContext,
MostFrequentVariantContext)
from corpustools import __version__
class PPWorker(FunctionWorker):
def run(self):
kwargs = self.kwargs
self.results = []
context = kwargs.pop('context')
if context == RestrictedContextWidget.canonical_value:
cm = CanonicalVariantContext
elif context == RestrictedContextWidget.frequent_value:
cm = MostFrequentVariantContext
corpus = kwargs['corpusModel'].corpus
st = kwargs['sequence_type']
tt = kwargs['type_token']
att = kwargs.get('attribute', None)
ft = kwargs['frequency_cutoff']
log_count = kwargs['log_count']
with cm(corpus, st, tt, attribute=att, frequency_threshold = ft, log_count=log_count) as c:
try:
if 'query' in kwargs:
for q in kwargs['query']:
res = phonotactic_probability(c, q,
algorithm = kwargs['algorithm'],
probability_type = kwargs['probability_type'],
stop_check = kwargs['stop_check'],
call_back = kwargs['call_back'])
if self.stopped:
break
self.results.append([q,res])
else:
end = kwargs['corpusModel'].beginAddColumn(att)
phonotactic_probability_all_words(c,
algorithm = kwargs['algorithm'],
probability_type = kwargs['probability_type'],
#num_cores = kwargs['num_cores'],
stop_check = kwargs['stop_check'],
call_back = kwargs['call_back'])
end = kwargs['corpusModel'].endAddColumn(end)
except PCTError as e:
self.errorEncountered.emit(e)
return
except Exception as e:
e = PCTPythonError(e)
self.errorEncountered.emit(e)
return
if self.stopped:
self.finishedCancelling.emit()
return
self.dataReady.emit(self.results)
class PPDialog(FunctionDialog):
header = ['Corpus',
'PCT ver.',
'Word',
'Analysis name',
'Algorithm',
'Probability type',
'Transcription tier',
'Frequency type',
'Log-scaled frequency',
'Pronunciation variants',
'Minimum word frequency',
'Result']
_about = [('This function calculates the phonotactic probability '
'of a word based on positional probabilities of single '
'segments and biphones derived from a corpus.'),
'',
'References: ',
('Vitevitch, Michael S. & Paul A. Luce. 2004.'
' A Web-based interface to calculate phonotactic'
' probability for words and nonwords in English.'
' Behavior Research Methods, Instruments, & Computers 36 (3), 481-487')
]
name = 'phonotactic probability'
def __init__(self, parent, settings, corpusModel, inventory, showToolTips):
FunctionDialog.__init__(self, parent, settings, PPWorker())
self.corpusModel = corpusModel
self.inventory = inventory
self.showToolTips = showToolTips
pplayout = QHBoxLayout()
algEnabled = {'Vitevitch && Luce':True}
self.algorithmWidget = RadioSelectWidget('Phonotactic probability algorithm',
OrderedDict([
('Vitevitch && Luce','vitevitch'),
]),
{'Vitevitch && Luce':self.vitevitchSelected,
},
algEnabled)
pplayout.addWidget(self.algorithmWidget)
queryFrame = QGroupBox('Query')
vbox = QFormLayout()
self.compType = None
self.oneWordRadio = QRadioButton('Calculate for one word')
self.oneWordRadio.clicked.connect(self.oneWordSelected)
self.oneWordRadio.setAutoExclusive(True)
self.oneWordEdit = QLineEdit()
self.oneWordEdit.textChanged.connect(self.oneWordRadio.click)
self.oneWordRadio.setChecked(True)
self.oneWordRadio.click()
self.oneNonwordRadio = QRadioButton('Calculate for a word/nonword not in the corpus')
self.oneNonwordRadio.clicked.connect(self.oneNonwordSelected)
self.oneNonwordRadio.setAutoExclusive(True)
self.oneNonwordLabel = QLabel('None created')
self.oneNonword = None
self.oneNonwordButton = QPushButton('Create word/nonword')
self.oneNonwordButton.clicked.connect(self.createNonword)
self.fileRadio = QRadioButton('Calculate for list of words')
self.fileRadio.clicked.connect(self.fileSelected)
self.fileRadio.setAutoExclusive(True)
self.fileWidget = FileWidget('Select a file', 'Text file (*.txt *.csv)')
self.fileWidget.textChanged.connect(self.fileRadio.click)
self.allwordsRadio = QRadioButton('Calculate for all words in the corpus')
self.allwordsRadio.clicked.connect(self.allwordsSelected)
self.allwordsRadio.setAutoExclusive(True)
self.columnEdit = QLineEdit()
self.columnEdit.setText('Phonotactic probability')
self.columnEdit.textChanged.connect(self.allwordsRadio.click)
vbox.addRow(self.oneWordRadio)
vbox.addRow(self.oneWordEdit)
vbox.addRow(self.oneNonwordRadio)
vbox.addRow(self.oneNonwordLabel,self.oneNonwordButton)
vbox.addRow(self.fileRadio)
vbox.addRow(self.fileWidget)
vbox.addRow(self.allwordsRadio)
vbox.addRow(QLabel('Column name:'),self.columnEdit)
note = QLabel(('(Selecting this option will add a new column containing the results to your corpus. '
'No results window will be displayed.)'))
note.setWordWrap(True)
vbox.addRow(note)
queryFrame.setLayout(vbox)
pplayout.addWidget(queryFrame)
optionFrame = QGroupBox('Options')
optionLayout = QVBoxLayout()
self.useLogScale = QCheckBox('Use log-scaled word frequencies (token count only)')
optionLayout.addWidget(self.useLogScale)
self.useLogScale.setChecked(True)
self.tierWidget = TierWidget(self.corpusModel.corpus,include_spelling=False)
optionLayout.addWidget(self.tierWidget)
self.typeTokenWidget = RadioSelectWidget('Type or token',
OrderedDict([('Count types','type'),
('Count tokens','token')]))
for widget in self.typeTokenWidget.widgets:
if 'token' in widget.text():
#we can only use log-scaling on token frequency
widget.clicked.connect(lambda x: self.useLogScale.setEnabled(True))
else:
#if type frequency is selected, then disable to log-scale option
widget.clicked.connect(lambda y: self.useLogScale.setEnabled(False))
self.typeTokenWidget.widgets[1].click()
#normally we do self.typeTokenWidget.initialClick()
#but here we default to token, not type, because that's in the original algorithim by V&L
actions = None
self.variantsWidget = RestrictedContextWidget(self.corpusModel.corpus, actions)
optionLayout.addWidget(self.variantsWidget)
optionLayout.addWidget(self.typeTokenWidget)
self.probabilityTypeWidget = RadioSelectWidget('Probability type',
OrderedDict([
('Biphone','bigram'),
('Single-phone','unigram')]))
optionLayout.addWidget(self.probabilityTypeWidget)
##----------------------
minFreqFrame = QGroupBox('Minimum frequency')
box = QFormLayout()
self.minFreqEdit = QLineEdit()
box.addRow('Minimum word frequency:',self.minFreqEdit)
minFreqFrame.setLayout(box)
optionLayout.addWidget(minFreqFrame)
##----------------------
optionFrame.setLayout(optionLayout)
pplayout.addWidget(optionFrame)
ppFrame = QFrame()
ppFrame.setLayout(pplayout)
self.layout().insertWidget(0,ppFrame)
self.algorithmWidget.initialClick()
self.algorithmWidget.initialClick()
if self.showToolTips:
self.tierWidget.setToolTip(("<FONT COLOR=black>"
'Select whether to calculate neighborhood density'
' on the spelling of a word (perhaps more useful for morphological purposes)'
' or any transcription tier of a word (perhaps more useful for phonological purposes),'
' in the corpus.'
"</FONT>"))
self.useLogScale.setToolTip(("<FONT COLOR=black>"
'If checked, then the token frequency count will be log-scaled. This option does not apply to type'
' frequency.'
"</FONT>"))
def createNonword(self):
dialog = AddWordDialog(self, self.corpusModel.corpus, self.inventory)
if dialog.exec_():
self.oneNonword = dialog.word
self.oneNonwordLabel.setText('{} ({})'.format(str(self.oneNonword),
str(self.oneNonword.transcription)))
self.oneNonwordRadio.click()
def oneWordSelected(self):
self.compType = 'one'
def oneNonwordSelected(self):
self.compType = 'nonword'
def fileSelected(self):
self.compType = 'file'
def allwordsSelected(self):
self.compType = 'all'
def generateKwargs(self):
##------------------
try:
frequency_cutoff = float(self.minFreqEdit.text())
except ValueError:
frequency_cutoff = 0.0
##-------------------
kwargs = {'corpusModel':self.corpusModel,
'algorithm': self.algorithmWidget.value(),
'context': self.variantsWidget.value(),
'sequence_type':self.tierWidget.value(),
'type_token':self.typeTokenWidget.value(),
'frequency_cutoff':frequency_cutoff,
'probability_type':self.probabilityTypeWidget.value(),
'log_count': self.useLogScale.isEnabled() and self.useLogScale.isChecked()}
if self.compType is None:
reply = QMessageBox.critical(self,
"Missing information", "Please specify a comparison type.")
return
elif self.compType == 'one':
text = self.oneWordEdit.text()
if not text:
reply = QMessageBox.critical(self,
"Missing information", "Please specify a word.")
return
try:
w = self.corpusModel.corpus.find(text)
except KeyError:
reply = QMessageBox.critical(self,
"Invalid information", "The spelling specified does match any words in the corpus.")
return
kwargs['query'] = [w]
elif self.compType == 'nonword':
if self.oneNonword is None:
reply = QMessageBox.critical(self,
"Missing information", "Please create a word/nonword.")
return
if not getattr(self.oneNonword,kwargs['sequence_type']):
reply = QMessageBox.critical(self,
"Missing information", "Please recreate the word/nonword with '{}' specified.".format(self.tierWidget.displayValue()))
return
kwargs['query'] = [self.oneNonword]
elif self.compType == 'file':
path = self.fileWidget.value()
if not path:
reply = QMessageBox.critical(self,
"Missing information", "Please enter a file path.")
return
if not os.path.exists(path):
reply = QMessageBox.critical(self,
"Invalid information", "The file path entered was not found.")
return
kwargs['query'] = list()
text = load_words_neighden(path)
for t in text:
if isinstance(t,str):
try:
w = self.corpusModel.corpus.find(t)
except KeyError:
reply = QMessageBox.critical(self,
"Invalid information", "The spelling '{}' was not found in the corpus.".format(t))
return
kwargs['query'].append(w)
elif self.compType == 'all':
column = self.columnEdit.text()
if column == '':
reply = QMessageBox.critical(self,
"Missing information", "Please enter a column name.")
return
colName = column.replace(' ','_')
attribute = Attribute(colName,'numeric',column)
if column in self.corpusModel.columns:
msgBox = QMessageBox(QMessageBox.Warning, "Duplicate columns",
"'{}' is already the name of a column. Overwrite?".format(column), QMessageBox.NoButton, self)
msgBox.addButton("Overwrite", QMessageBox.AcceptRole)
msgBox.addButton("Cancel", QMessageBox.RejectRole)
if msgBox.exec_() != QMessageBox.AcceptRole:
return
kwargs['attribute'] = attribute
return kwargs
def setResults(self, results):
self.results = []
try:
frequency_cutoff = float(self.minFreqEdit.text())
except ValueError:
frequency_cutoff = 0.0
for result in results:
w, pp = result
self.results.append({'Corpus': self.corpusModel.corpus.name,
'PCT ver.': __version__,#self.corpusModel.corpus._version,
'Analysis name': self.name.capitalize(),
'Word': str(w),
'Algorithm': self.algorithmWidget.displayValue().replace('&&','&'),
'Probability type': self.probabilityTypeWidget.displayValue(),
'Transcription tier': self.tierWidget.displayValue(),
'Frequency type': self.typeTokenWidget.value().title(),
'Log-scaled frequency': 'Yes' if self.useLogScale.isChecked() else 'No',
'Pronunciation variants': self.variantsWidget.value().title(),
'Minimum word frequency': frequency_cutoff,
'Result': pp})
def vitevitchSelected(self):
self.probabilityTypeWidget.enable()
self.typeTokenWidget.enable()
|
bsd-3-clause
| 7,514,661,540,657,869,000
| 43.181572
| 142
| 0.5518
| false
| 4.809145
| false
| false
| false
|
avanwyk/cipy
|
cipy/algorithms/pso/functions.py
|
1
|
9569
|
# Copyright 2016 Andrich van Wyk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Collection of functions used to implement the PSO algorithm.
"""
import numpy as np
from cipy.algorithms.core import comparator
from cipy.algorithms.pso.types import Particle
def std_position(position, velocity):
"""
Standard particle position update according to the equation:
:math:`x_{ij}(t+1) = x_{ij}(t) + \
v_{ij}(t),\\;\\;\\forall\\; j \\in\\; \\{1,...,n\\}`
Args:
position (numpy.array): The current position.
velocity (numpy.array): The particle velocity.
Returns:
numpy.array: The calculated position.
"""
return position + velocity
def std_velocity(particle, social, state):
"""
Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max'].
"""
inertia = state.params['inertia']
c_1, c_2 = state.params['c_1'], state.params['c_2']
v_max = state.params['v_max']
size = particle.position.size
c1r1 = __acceleration__(state.rng, c_1, size)
c2r2 = __acceleration__(state.rng, c_2, size)
velocity = __std_velocity_equation__(inertia, c1r1, c2r2, particle, social)
return __clamp__(velocity, v_max)
def __std_velocity_equation__(inertia, c1r1, c2r2, particle, social):
return (inertia * particle.velocity +
c1r1 * (particle.best_position - particle.position) +
c2r2 * (social - particle.position))
def __acceleration__(rng, coefficient, size):
return rng.uniform(0.0, coefficient, size)
def __clamp__(velocity, v_max):
return velocity if v_max is None else np.clip(velocity, -v_max, v_max)
def gc_velocity_update(particle, social, state):
""" Guaranteed convergence velocity update.
Args:
particle: cipy.algorithms.pso.Particle: Particle to update the velocity
for.
social: cipy.algorithms.pso.Particle: The social best for the particle.
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
Returns:
numpy.ndarray: the calculated velocity.
"""
gbest = state.swarm[gbest_idx(state.swarm)].position
if not np.array_equal(gbest, particle.position):
return std_velocity(particle, social, state)
rho = state.params['rho']
inertia = state.params['inertia']
v_max = state.params['v_max']
size = particle.position.size
r2 = state.rng.uniform(0.0, 1.0, size)
velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest)
return __clamp__(velocity, v_max)
def __gc_velocity_equation__(inertia, rho, r2, particle, gbest):
return (-1 * particle.position + gbest + inertia *
particle.velocity + rho * (1 - 2 * r2))
def std_parameter_update(state, objective_function):
return state
def initialize_particle(rng, domain, fitness_function):
""" Initializes a particle within a domain.
Args:
rng: numpy.random.RandomState: The random number generator.
domain: cipy.problems.core.Domain: The domain of the problem.
Returns:
cipy.algorithms.pso.Particle: A new, fully initialized particle.
"""
position = rng.uniform(domain.lower, domain.upper, domain.dimension)
fitness = fitness_function(position)
return Particle(position=position,
velocity=np.zeros(domain.dimension),
fitness=fitness,
best_fitness=fitness,
best_position=position)
def update_fitness(objective_function, particle):
""" Calculates and updates the fitness and best_fitness of a particle.
Fitness is calculated using the 'problem.fitness' function.
Args:
problem: The optimization problem encapsulating the fitness function
and optimization type.
particle: cipy.algorithms.pso.Particle: Particle to update the fitness
for.
Returns:
cipy.algorithms.pso.Particle: A new particle with the updated fitness.
"""
fitness = objective_function(particle.position)
best_fitness = particle.best_fitness
cmp = comparator(fitness)
if best_fitness is None or cmp(fitness, best_fitness):
best_position = particle.position
return particle._replace(fitness=fitness,
best_fitness=fitness,
best_position=best_position)
else:
return particle._replace(fitness=fitness)
def update_particle(position_update, velocity_update, state, nbest_topology,
idx_particle):
""" Update function for a particle.
Calculates and updates the velocity and position of a particle for a
single iteration of the PSO algorithm. Social best particle is determined
by the state.params['topology'] function.
Args:
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
nbest_topology: dict: Containing neighbourhood best index for each
particle index.
idx_particle: tuple: Tuple of the index of the particle and the
particle itself.
Returns:
cipy.algorithms.pso.Particle: A new particle with the updated position
and velocity.
"""
(idx, particle) = idx_particle
nbest = state.swarm[nbest_topology[idx]].best_position
velocity = velocity_update(particle, nbest, state)
position = position_update(particle.position, velocity)
return particle._replace(position=position, velocity=velocity)
def gbest_topology(state):
gbest = gbest_idx(state.swarm)
return __topology__(state.swarm, lambda i: gbest)
def gbest_idx(swarm):
""" gbest Neighbourhood topology function.
Args:
swarm: list: The list of particles.
Returns:
int: The index of the gbest particle.
"""
best = 0
cmp = comparator(swarm[best].best_fitness)
for (idx, particle) in enumerate(swarm):
if cmp(particle.best_fitness, swarm[best].best_fitness):
best = idx
return best
def lbest_topology(state):
return __topology__(state.swarm, lambda i: lbest_idx(state, i))
def lbest_idx(state, idx):
""" lbest Neighbourhood topology function.
Neighbourhood size is determined by state.params['n_s'].
Args:
state: cipy.algorithms.pso.State: The state of the PSO algorithm.
idx: int: index of the particle in the swarm.
Returns:
int: The index of the lbest particle.
"""
swarm = state.swarm
n_s = state.params['n_s']
cmp = comparator(swarm[0].best_fitness)
indices = __lbest_indices__(len(swarm), n_s, idx)
best = None
for i in indices:
if best is None or cmp(swarm[i].best_fitness, swarm[best].best_fitness):
best = i
return best
def __lbest_indices__(size, n_s, idx):
start = idx - (n_s // 2)
idxs = []
for k in range(n_s):
idxs.append((start + k) % size)
return idxs
def update_rho(state, objective_function):
params = state.params
rho = params['rho']
e_s = params['e_s']
e_f = params['e_f']
successes = params.get('successes', 0)
failures = params.get('failures', 0)
global_best = solution(state.swarm)
fitness = objective_function(global_best.position)
cmp = comparator(global_best.best_fitness)
if cmp(fitness, global_best.best_fitness):
successes += 1
failures = 0
else:
failures += 1
successes = 0
if successes > e_s:
rho *= 2
elif failures > e_f:
rho *= 0.5
else:
rho = rho
params['rho'] = rho
params['successes'] = successes
params['failures'] = failures
return state._replace(params=params)
def solution(swarm):
""" Determines the global best particle in the swarm.
Args:
swarm: iterable: an iterable that yields all particles in the swarm.
Returns:
cipy.algorithms.pso.Particle: The best particle in the swarm when
comparing the best_fitness values of the particles.
"""
best = swarm[0]
cmp = comparator(best.best_fitness)
for particle in swarm:
if cmp(particle.best_fitness, best.best_fitness):
best = particle
return best
def fitness_measurement(state):
swarm = state.swarm
return 'fitness', swarm[gbest_idx(swarm)].best_fitness
def __topology__(swarm, social_best):
return dict([(idx, social_best(idx)) for idx in range(len(swarm))])
|
apache-2.0
| 3,489,454,286,974,073,300
| 29.669872
| 80
| 0.64479
| false
| 3.71756
| false
| false
| false
|
SU-ECE-17-7/ibeis
|
_broken/preproc_featweight.py
|
1
|
13024
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# Python
from six.moves import zip, range, map # NOQA
# UTool
import utool as ut
import vtool as vt
#import vtool.image as vtimage
import numpy as np
from ibeis.algo.preproc import preproc_probchip
from os.path import exists
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[preproc_featweight]')
def test_featweight_worker():
"""
test function
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --cnn
"""
import ibeis
qreq_ = ibeis.main_helpers.testdata_qreq_(defaultdb='PZ_MTEST', p=['default:fw_detector=cnn'], qaid_override=[1])
ibs = qreq_.ibs
config2_ = qreq_.qparams
lazy = True
aid_list = qreq_.qaids
#aid_list = ibs.get_valid_aids()[0:30]
kpts_list = ibs.get_annot_kpts(aid_list)
chipsize_list = ibs.get_annot_chip_sizes(aid_list, config2_=config2_)
probchip_fpath_list = preproc_probchip.compute_and_write_probchip(ibs,
aid_list,
lazy=lazy,
config2_=config2_)
print('probchip_fpath_list = %r' % (probchip_fpath_list,))
probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None
for fpath in probchip_fpath_list]
_iter = list(zip(aid_list, kpts_list, probchip_list, chipsize_list))
_iter = ut.InteractiveIter(_iter, enabled=ut.get_argflag('--show'))
for aid, kpts, probchip, chipsize in _iter:
#kpts = kpts_list[0]
#aid = aid_list[0]
#probchip = probchip_list[0]
#chipsize = chipsize_list[0]
tup = (aid, kpts, probchip, chipsize)
(aid, weights) = gen_featweight_worker(tup)
if aid == 3 and ibs.get_dbname() == 'testdb1':
# Run Asserts if not interactive
weights_03_test = weights[0:3]
print('weights[0:3] = %r' % (weights_03_test,))
#weights_03_target = [ 0.098, 0.155, 0.422]
#weights_03_target = [ 0.324, 0.407, 0.688]
#weights_thresh = [ 0.09, 0.09, 0.09]
#ut.assert_almost_eq(weights_03_test, weights_03_target, weights_thresh)
ut.assert_inbounds(weights_03_test, 0, 1)
if not ut.show_was_requested():
break
if ut.show_was_requested():
import plottool as pt
#sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
#kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
pnum_ = pt.make_pnum_nextgen(1, 3) # *pt.get_square_row_cols(4))
fnum = 1
pt.figure(fnum=fnum, doclf=True)
###
pt.imshow(ibs.get_annot_chips(aid, config2_=config2_), pnum=pnum_(0), fnum=fnum)
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(1)')
###
pt.imshow(probchip, pnum=pnum_(2), fnum=fnum)
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(2)')
#pt.draw_kpts2(kpts_, ell_alpha=.4, color_list=pt.ORANGE)
###
#pt.imshow(probchip, pnum=pnum_(3), fnum=fnum)
#color_list = pt.draw_kpts2(kpts_, weights=weights, ell_alpha=.7, cmap_='jet')
#cb = pt.colorbar(weights, color_list)
#cb.set_label('featweights')
###
pt.imshow(ibs.get_annot_chips(aid, config2_=qreq_.qparams), pnum=pnum_(1), fnum=fnum)
#color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3, cmap_='jet')
color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3)
cb = pt.colorbar(weights, color_list)
cb.set_label('featweights')
if ut.get_argflag('--numlbl'):
pt.gca().set_xlabel('(3)')
#pt.draw_kpts2(kpts, ell_alpha=.4)
pt.draw()
pt.show_if_requested()
def gen_featweight_worker(tup):
"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (aid, tuple(kpts(ndarray), probchip_fpath )): keypoints and probability chip file path
aid, kpts, probchip_fpath
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --dpath figures --save ~/latex/crall-candidacy-2015/figures/gen_featweight.jpg
python -m ibeis.algo.preproc.preproc_featweight --test-gen_featweight_worker --show --db PZ_MTEST --qaid_list=1,2,3,4,5,6,7,8,9
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> test_featweight_worker()
Ignore::
import plottool as pt
pt.imshow(probchip_list[0])
patch_list = [vt.patch.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts[0:1]]
patch_ = patch_list[0].copy()
patch = patch_
patch = patch_[-20:, :20, 0]
import vtool as vt
gaussian_patch = vt.gaussian_patch(patch.shape[1], patch.shape[0], shape=patch.shape[0:2], norm_01=False)
import cv2
sigma = 1/10
xkernel = (cv2.getGaussianKernel(patch.shape[1], sigma))
ykernel = (cv2.getGaussianKernel(patch.shape[0], sigma))
#ykernel = ykernel / ykernel.max()
#xkernel = ykernel / xkernel.max()
gaussian_kern2 = ykernel.dot(xkernel.T)
print(gaussian_kern2.sum())
patch2 = patch.copy()
patch2 = np.multiply(patch2, ykernel)
patch2 = np.multiply(patch2.T, xkernel).T
if len(patch3.shape) == 2:
patch3 = patch.copy() * gaussian_patch[:,:]
else:
patch3 = patch.copy() * gaussian_patch[:,:, None]
sum2 = patch2.sum() / (patch2.size)
sum3 = patch3.sum() / (patch3.size)
print(sum2)
print(sum3)
fig = pt.figure(fnum=1, pnum=(1, 3, 1), doclf=True, docla=True)
pt.imshow(patch * 255)
fig = pt.figure(fnum=1, pnum=(1, 3, 2))
pt.imshow(gaussian_kern2 * 255.0)
fig = pt.figure(fnum=1, pnum=(1, 3, 3))
pt.imshow(patch2 * 255.0)
pt.update()
"""
(aid, kpts, probchip, chipsize) = tup
if probchip is None:
# hack for undetected chips. SETS ALL FEATWEIGHTS TO .25 = 1/4
weights = np.full(len(kpts), .25, dtype=np.float32)
else:
sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
#vt.patch.get_warped_patches()
patch_list = [vt.patch.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0
for kp in kpts_]
weight_list = [vt.patch.gaussian_average_patch(patch) for patch in patch_list]
#weight_list = [patch.sum() / (patch.size) for patch in patch_list]
weights = np.array(weight_list, dtype=np.float32)
return (aid, weights)
def compute_fgweights(ibs, aid_list, config2_=None):
"""
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> config2_ = None
>>> featweight_list = compute_fgweights(ibs, aid_list)
>>> result = np.array_str(featweight_list[0][0:3], precision=3)
>>> print(result)
[ 0.125 0.061 0.053]
"""
nTasks = len(aid_list)
print('[preproc_featweight.compute_fgweights] Preparing to compute %d fgweights' % (nTasks,))
probchip_fpath_list = preproc_probchip.compute_and_write_probchip(ibs,
aid_list,
config2_=config2_)
chipsize_list = ibs.get_annot_chip_sizes(aid_list, config2_=config2_)
#if ut.DEBUG2:
# from PIL import Image
# probchip_size_list = [Image.open(fpath).size for fpath in probchip_fpath_list] # NOQA
# #with ut.embed_on_exception_context:
# # does not need to happen anymore
# assert chipsize_list == probchip_size_list, 'probably need to clear chip or probchip cache'
kpts_list = ibs.get_annot_kpts(aid_list, config2_=config2_)
# Force grayscale reading of chips
probchip_list = [vt.imread(fpath, grayscale=True) if exists(fpath) else None
for fpath in probchip_fpath_list]
print('[preproc_featweight.compute_fgweights] Computing %d fgweights' % (nTasks,))
arg_iter = zip(aid_list, kpts_list, probchip_list, chipsize_list)
featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
nTasks=nTasks, ordered=True, freq=10)
featweight_param_list = list(featweight_gen)
#arg_iter = zip(aid_list, kpts_list, probchip_list)
#featweight_param_list1 = [gen_featweight_worker((aid, kpts, probchip)) for
#aid, kpts, probchip in arg_iter]
#featweight_aids = ut.get_list_column(featweight_param_list, 0)
featweight_list = ut.get_list_column(featweight_param_list, 1)
print('[preproc_featweight.compute_fgweights] Done computing %d fgweights' % (nTasks,))
return featweight_list
def generate_featweight_properties(ibs, feat_rowid_list, config2_=None):
"""
Args:
ibs (IBEISController):
fid_list (list):
Returns:
featweight_list
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight --test-generate_featweight_properties
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config2_ = ibs.new_query_params(dict(fg_on=True, fw_detector='rf'))
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> fid_list = ibs.get_annot_feat_rowids(aid_list, ensure=True)
>>> #fid_list = ibs.get_valid_fids()[1:2]
>>> featweighttup_gen = generate_featweight_properties(ibs, fid_list, config2_=config2_)
>>> featweighttup_list = list(featweighttup_gen)
>>> featweight_list = featweighttup_list[0][0]
>>> featweight_test = featweight_list[0:3]
>>> featweight_target = [ 0.349, 0.218, 0.242]
>>> ut.assert_almost_eq(featweight_test, featweight_target, .3)
"""
# HACK: TODO AUTOGENERATE THIS
#cid_list = ibs.get_feat_cids(feat_rowid_list)
#aid_list = ibs.get_chip_aids(cid_list)
chip_rowid_list = ibs.dbcache.get(ibs.const.FEATURE_TABLE, ('chip_rowid',), feat_rowid_list)
aid_list = ibs.dbcache.get(ibs.const.CHIP_TABLE, ('annot_rowid',), chip_rowid_list)
featweight_list = compute_fgweights(ibs, aid_list, config2_=config2_)
return zip(featweight_list)
#def get_annot_probchip_fname_iter(ibs, aid_list):
# """ Returns probability chip path iterator
# Args:
# ibs (IBEISController):
# aid_list (list):
# Returns:
# probchip_fname_iter
# Example:
# >>> from ibeis.algo.preproc.preproc_featweight import * # NOQA
# >>> import ibeis
# >>> ibs = ibeis.opendb('testdb1')
# >>> aid_list = ibs.get_valid_aids()
# >>> probchip_fname_iter = get_annot_probchip_fname_iter(ibs, aid_list)
# >>> probchip_fname_list = list(probchip_fname_iter)
# """
# cfpath_list = ibs.get_annot_chip_fpath(aid_list, config2_=config2_)
# cfname_list = [splitext(basename(cfpath))[0] for cfpath in cfpath_list]
# suffix = ibs.cfg.detect_cfg.get_cfgstr()
# ext = '.png'
# probchip_fname_iter = (''.join([cfname, suffix, ext]) for cfname in cfname_list)
# return probchip_fname_iter
#def get_annot_probchip_fpath_list(ibs, aid_list):
# cachedir = get_probchip_cachedir(ibs)
# probchip_fname_list = get_annot_probchip_fname_iter(ibs, aid_list)
# probchip_fpath_list = [join(cachedir, fname) for fname in probchip_fname_list]
# return probchip_fpath_list
#class FeatWeightConfig(object):
# # TODO: Put this in a config
# def __init__(fw_cfg):
# fw_cfg.sqrt_area = 800
def on_delete(ibs, featweight_rowid_list, config2_=None):
# no external data to remove
return 0
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.algo.preproc.preproc_featweight
python -m ibeis.algo.preproc.preproc_featweight --allexamples
python -m ibeis.algo.preproc.preproc_featweight --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
apache-2.0
| 8,091,099,537,909,222,000
| 40.74359
| 170
| 0.592061
| false
| 3.115789
| true
| false
| false
|
zielmicha/satori
|
satori.events/satori/events/master.py
|
1
|
7084
|
# vim:ts=4:sts=4:sw=4:expandtab
"""Master (central) event coordinator.
"""
import collections
import select
from _multiprocessing import Connection
from multiprocessing.connection import Listener
from satori.objects import Argument
from .api import Manager
from .client import Client, Scheduler
from .mapper import Mapper
from .protocol import Command, KeepAlive, ProtocolError
class PollScheduler(Scheduler):
"""A Scheduler using select.poll on file descriptors.
"""
def __init__(self):
self.waits = select.poll()
self.fdmap = dict()
self.ready = collections.deque()
def next(self):
"""Return the next Client to handle.
A Client is available when its file descriptor is ready to be read from.
Available Clients are scheduler in a round-robin fashion.
"""
while len(self.ready) == 0:
for fileno, event in self.waits.poll():
client = self.fdmap[fileno]
if event & (select.POLLERR | select.POLLHUP) != 0:
self.remove(client)
self.ready.append(client)
return self.ready.popleft()
def add(self, client):
"""Add a Client to this Scheduler.
"""
fileno = client.fileno
if fileno in self.fdmap:
return
#print 'PollScheduler: registered new client with fd', fileno
self.fdmap[fileno] = client
self.waits.register(fileno, select.POLLIN | select.POLLHUP | select.POLLERR)
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
fileno = client.fileno
if fileno not in self.fdmap:
return
self.waits.unregister(fileno)
del self.fdmap[fileno]
class SelectScheduler(Scheduler):
"""A Scheduler using select.select on file descriptors.
"""
def __init__(self):
self.fdmap = dict()
self.ready = collections.deque()
def next(self):
"""Return the next Client to handle.
A Client is available when its file descriptor is ready to be read from.
Available Clients are scheduler in a round-robin fashion.
"""
while len(self.ready) == 0:
for fileno in select.select(self.fdmap.keys(), [], [])[0]:
client = self.fdmap[fileno]
self.ready.append(client)
return self.ready.popleft()
def add(self, client):
"""Add a Client to this Scheduler.
"""
fileno = client.fileno
if fileno in self.fdmap:
return
self.fdmap[fileno] = client
def remove(self, client):
"""Remove a Client from this Scheduler.
"""
fileno = client.fileno
if fileno not in self.fdmap:
return
del self.fdmap[fileno]
class ConnectionClient(Client):
"""Out-of-process Client communicating over multiprocessing.Connection.
"""
@Argument('scheduler', type=Scheduler)
@Argument('connection', type=Connection)
def __init__(self, connection):
self.connection = connection
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
"""
self.connection.send(response)
def recvCommand(self):
"""Receive the next command from this Client.
"""
command = self.connection.recv()
if not isinstance(command, Command):
raise ProtocolError("received object is not a Command")
return command
def disconnect(self):
"""Disconnect this Client.
"""
self.scheduler.remove(self)
self.connection.close()
fileno = property(lambda self: self.connection.fileno())
class ListenerClient(Client):
"""In-process Client wrapping a multiprocessing.connection.Listener.
"""
@Argument('scheduler', type=Scheduler)
@Argument('listener', type=Listener)
def __init__(self, listener):
self.listener = listener
self.scheduler.add(self)
def sendResponse(self, response):
"""Send a response to this Client.
"""
pass
def recvCommand(self):
"""Receive the next command from this Client.
"""
try:
#print 'ListenerClient: waiting for connection'
connection = self.listener.accept()
#print 'ListenerClient: got connection'
except:
raise ProtocolError("Listener.accept() failed")
ConnectionClient(scheduler=self.scheduler, connection=connection)
return KeepAlive()
def disconnect(self):
"""Disconnect this Client.
"""
self.scheduler.remove(self)
self.listener.close()
# pylint: disable-msg=W0212
fileno = property(lambda self: self.listener._listener._socket.fileno())
# pylint: enable-msg=W0212
class Master(Manager):
"""The central Event Manager.
"""
@Argument('mapper', type=Mapper)
def __init__(self, mapper):
self.mapper = mapper
if hasattr(select, 'poll'):
self.scheduler = PollScheduler()
else:
self.scheduler = SelectScheduler()
self.serial = 0
def connectSlave(self, connection):
"""Attach a new Slave over the given connection.
"""
ConnectionClient(scheduler=self.scheduler, connection=connection)
def listen(self, listener):
"""Listen for new Slave connections using the given Listener.
"""
ListenerClient(scheduler=self.scheduler, listener=listener)
def _print(self, command, sender):
pass
#print 'event master: received', command, 'from', sender
def _handleKeepAlive(self, _command, sender):
self._print(_command, sender)
sender.sendResponse(None)
def _handleDisconnect(self, _command, sender):
self._print(_command, sender)
sender.disconnect()
def _handleAttach(self, command, sender):
self._print(command, sender)
self.dispatcher.attach(sender, command.queue_id)
sender.sendResponse(None)
def _handleDetach(self, command, sender):
self._print(command, sender)
self.dispatcher.detach(sender, command.queue_id)
sender.sendResponse(None)
def _handleMap(self, command, sender):
self._print(command, sender)
mapping_id = self.mapper.map(command.criteria, command.queue_id)
sender.sendResponse(mapping_id)
def _handleUnmap(self, command, sender):
self._print(command, sender)
self.mapper.unmap(command.mapping_id)
sender.sendResponse(None)
def _handleSend(self, command, sender):
self._print(command, sender)
event = command.event
event.serial = self.serial
self.serial += 1
sender.sendResponse(event.serial)
for queue_id in self.mapper.resolve(event):
self.dispatcher.enqueue(queue_id, event)
def _handleReceive(self, _command, sender):
self._print(_command, sender)
self.dispatcher.activate(sender)
|
mit
| -971,804,800,364,368,400
| 29.273504
| 84
| 0.619565
| false
| 4.351351
| false
| false
| false
|
zhlooking/LearnPython
|
Advaced_Features/slice.py
|
1
|
1281
|
L = ['Micheal', 'Hanson', 'William', 'Lucy', 'Frank']
# if you want to get the first three values in a list
# 1> The simplest way
def getFirstThreeValueOfList1(L):
subL1 = [L[0], L[1], L[2]]
return subL1
# 2> Use a loop
def getSubList(L = None, n = 3):
if (not isinstance(L, (list)) and not isinstance(n, (int, float))):
raise TypeError('bad operand type')
subL2 = []
for i in range(n):
subL2.append[L[i]]
return subL2
# 3> Use Slice feature
def getSubListBySlice(L, first = 0, last = -1):
if (not isinstance(L, (list)) and not isinstance((first, last), (int, float))):
raise TypeError('bad operand type')
if last > 0 and last > first:
return L[first:last]
elif last < 0 and last + len(L) > first:
return L[first:last]
else:
raise TypeError('Argument value error')
#
# Test
print L
print getSubListBySlice(L, 0, 2)
print getSubListBySlice(L, 3)
print getSubListBySlice(L, -3)
print getSubListBySlice(L, 20, 30)
####If there is a list and you want to get a value every 3 values behind
def getValuePerXValue(L, n):
return L[::n]
# Test
NumList = range(100)
print getValuePerXValue(NumList, 22)
#### Iterator ####
from collections import Iterable
print isinstance('ABC', Iterable)
print isinstance([], Iterable)
print isinstance(123, Iterable)
|
mit
| 1,213,449,190,329,562,000
| 24.117647
| 80
| 0.68306
| false
| 2.821586
| false
| false
| false
|
ryfeus/lambda-packs
|
Spacy/source2.7/plac_tk.py
|
1
|
1888
|
from __future__ import print_function
import os
import sys
import Queue
import plac_core
from Tkinter import Tk
from ScrolledText import ScrolledText
from plac_ext import Monitor, TerminatedProcess
class TkMonitor(Monitor):
"""
An interface over a dictionary {taskno: scrolledtext widget}, with
methods add_listener, del_listener, notify_listener and start/stop.
"""
def __init__(self, name, queue=None):
Monitor.__init__(self, name, queue)
self.widgets = {}
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def add_listener(self, taskno):
"There is a ScrolledText for each task"
st = ScrolledText(self.root, height=5)
st.insert('end', 'Output of task %d\n' % taskno)
st.pack()
self.widgets[taskno] = st
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def del_listener(self, taskno):
del self.widgets[taskno]
@plac_core.annotations(taskno=('task number', 'positional', None, int))
def notify_listener(self, taskno, msg):
w = self.widgets[taskno]
w.insert('end', msg + '\n')
w.update()
def start(self):
'Start the mainloop'
self.root = Tk()
self.root.title(self.name)
self.root.wm_protocol("WM_DELETE_WINDOW", self.stop)
self.root.after(0, self.read_queue)
try:
self.root.mainloop()
except KeyboardInterrupt:
print('Process %d killed by CTRL-C' % os.getpid(), file=sys.stderr)
except TerminatedProcess:
pass
def stop(self):
self.root.quit()
def read_queue(self):
try:
cmd_args = self.queue.get_nowait()
except Queue.Empty:
pass
else:
getattr(self, cmd_args[0])(*cmd_args[1:])
self.root.after(100, self.read_queue)
|
mit
| 2,970,059,258,354,080,000
| 29.95082
| 79
| 0.610169
| false
| 3.609943
| false
| false
| false
|
tomchadwin/qgis2web
|
qgis2web/bridgestyle/sld/fromgeostyler.py
|
1
|
20761
|
import os
from xml.etree.ElementTree import Element, SubElement
from xml.etree import ElementTree
from xml.dom import minidom
from .transformations import processTransformation
import zipfile
_warnings = []
# return a dictionary<int,list of rules>, where int is the Z value
# symbolizers are marked with a Z
#
# a rule (with multiple sybolizers) will have the rule replicated, one for each Z value found in the symbolizer
#
# ie. rule[0]["symbolizers"][0] has Z=0
# rule[0]["symbolizers"][1] has Z=1
#
# this will return
# result[0] => rule with symbolizer 0 (name changed to include Z=0)
# result[1] => rule with symbolizer 1 (name changed to include Z=1)
def processRulesByZ(rules):
result = {}
for rule in rules:
for symbolizer in rule["symbolizers"]:
z = symbolizer.get("Z", 0)
if z not in result:
result[z] = []
r = result[z]
rule_copy = rule.copy()
rule_copy["symbolizers"] = [symbolizer]
rule_copy["name"] += ", Z=" + str(z)
r.append(rule_copy)
return result
def convert(geostyler):
global _warnings
_warnings = []
attribs = {
"version": "1.0.0",
"xsi:schemaLocation": "http://www.opengis.net/sld StyledLayerDescriptor.xsd",
"xmlns": "http://www.opengis.net/sld",
"xmlns:ogc": "http://www.opengis.net/ogc",
"xmlns:xlink": "http://www.w3.org/1999/xlink",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
rulesByZ = processRulesByZ(geostyler["rules"])
root = Element("StyledLayerDescriptor", attrib=attribs)
namedLayer = SubElement(root, "NamedLayer")
layerName = SubElement(namedLayer, "Name")
layerName.text = geostyler["name"]
userStyle = SubElement(namedLayer, "UserStyle")
userStyleTitle = SubElement(userStyle, "Title")
userStyleTitle.text = geostyler["name"]
z_values = list(rulesByZ.keys())
z_values.sort()
for z_value in z_values:
zrules = rulesByZ[z_value]
featureTypeStyle = SubElement(userStyle, "FeatureTypeStyle")
if "transformation" in geostyler:
featureTypeStyle.append(processTransformation(geostyler["transformation"]))
for rule in zrules:
featureTypeStyle.append(processRule(rule))
if "blendMode" in geostyler:
_addVendorOption(featureTypeStyle, "composite", geostyler["blendMode"])
sldstring = ElementTree.tostring(root, encoding="utf8", method="xml").decode()
dom = minidom.parseString(sldstring)
result = dom.toprettyxml(indent=" "), _warnings
return result
def processRule(rule):
ruleElement = Element("Rule")
ruleName = SubElement(ruleElement, "Name")
ruleName.text = rule.get("name", "")
ruleFilter = rule.get("filter", None)
if ruleFilter == "ELSE":
filterElement = Element("ElseFilter")
ruleElement.append(filterElement)
else:
filt = convertExpression(ruleFilter)
if filt is not None:
filterElement = Element("ogc:Filter")
filterElement.append(filt)
ruleElement.append(filterElement)
if "scaleDenominator" in rule:
scale = rule["scaleDenominator"]
if "min" in scale:
minScale = SubElement(ruleElement, "MinScaleDenominator")
minScale.text = str(scale["min"])
if "max" in scale:
maxScale = SubElement(ruleElement, "MaxScaleDenominator")
maxScale.text = str(scale["max"])
symbolizers = _createSymbolizers(rule["symbolizers"])
ruleElement.extend(symbolizers)
return ruleElement
def _createSymbolizers(symbolizers):
sldSymbolizers = []
for sl in symbolizers:
symbolizer = _createSymbolizer(sl)
if symbolizer is not None:
if isinstance(symbolizer, list):
sldSymbolizers.extend(symbolizer)
else:
sldSymbolizers.append(symbolizer)
return sldSymbolizers
def _createSymbolizer(sl):
symbolizerType = sl["kind"]
if symbolizerType == "Icon":
symbolizer = _iconSymbolizer(sl)
if symbolizerType == "Line":
symbolizer = _lineSymbolizer(sl)
if symbolizerType == "Fill":
symbolizer = _fillSymbolizer(sl)
if symbolizerType == "Mark":
symbolizer = _markSymbolizer(sl)
if symbolizerType == "Text":
symbolizer = _textSymbolizer(sl)
if symbolizerType == "Raster":
symbolizer = _rasterSymbolizer(sl)
if not isinstance(symbolizer, list):
symbolizer = [symbolizer]
for s in symbolizer:
geom = _geometryFromSymbolizer(sl)
if geom is not None:
s.insert(0, geom)
return symbolizer
def _symbolProperty(sl, name, default=None):
if name in sl:
return _processProperty(sl[name])
else:
return default
def _processProperty(value):
v = convertExpression(value)
if isinstance(v, Element) and v.tag == "ogc:Literal":
v = v.text
return v
def _addValueToElement(element, value):
if value is not None:
if isinstance(value, Element):
element.append(value)
else:
element.text = str(value)
def _addCssParameter(parent, name, value):
if value is not None:
sub = SubElement(parent, "CssParameter", name=name)
_addValueToElement(sub, value)
return sub
def _addSubElement(parent, tag, value=None, attrib={}):
strAttrib = {k: str(v) for k, v in attrib.items()}
sub = SubElement(parent, tag, strAttrib)
_addValueToElement(sub, value)
return sub
def _addVendorOption(parent, name, value):
if value is not None:
sub = SubElement(parent, "VendorOption", name=name)
_addValueToElement(sub, value)
return sub
def _rasterSymbolizer(sl):
opacity = sl["opacity"]
root = Element("RasterSymbolizer")
_addSubElement(root, "Opacity", opacity)
channelSelectionElement = _addSubElement(root, "ChannelSelection")
for chanName in ["grayChannel", "redChannel", "greenChannel", "blueChannel"]:
if chanName in sl["channelSelection"]:
sldChanName = chanName[0].upper() + chanName[1:]
channel = _addSubElement(channelSelectionElement, sldChanName)
_addSubElement(
channel,
"SourceChannelName",
sl["channelSelection"][chanName]["sourceChannelName"],
)
if "colorMap" in sl:
colMap = sl["colorMap"]
colMapElement = _addSubElement(
root, "ColorMap", None, {"type": sl["colorMap"]["type"]}
)
for entry in colMap["colorMapEntries"]:
attribs = {
"color": entry["color"],
"quantity": entry["quantity"],
"label": entry["label"],
"opacity": entry["opacity"],
}
_addSubElement(colMapElement, "ColorMapEntry", None, attribs)
return root
def _textSymbolizer(sl):
color = _symbolProperty(sl, "color")
fontFamily = _symbolProperty(sl, "font")
label = _symbolProperty(sl, "label")
size = _symbolProperty(sl, "size")
root = Element("TextSymbolizer")
_addSubElement(root, "Label", label)
fontElem = _addSubElement(root, "Font")
_addCssParameter(fontElem, "font-family", fontFamily)
_addCssParameter(fontElem, "font-size", size)
if "offset" in sl:
placement = _addSubElement(root, "LabelPlacement")
pointPlacement = _addSubElement(placement, "PointPlacement")
if "anchor" in sl:
anchor = sl["anchor"]
# TODO: Use anchor
# centers
achorLoc = _addSubElement(pointPlacement, "AnchorPoint")
_addSubElement(achorLoc, "AnchorPointX", "0.5")
_addSubElement(achorLoc, "AnchorPointY", "0.5")
displacement = _addSubElement(pointPlacement, "Displacement")
offset = sl["offset"]
offsetx = _processProperty(offset[0])
offsety = _processProperty(offset[1])
_addSubElement(displacement, "DisplacementX", offsetx)
_addSubElement(displacement, "DisplacementY", offsety)
if "rotate" in sl:
rotation = _symbolProperty(sl, "rotate")
_addSubElement(displacement, "Rotation", rotation)
elif "perpendicularOffset" in sl and "background" not in sl:
placement = _addSubElement(root, "LabelPlacement")
linePlacement = _addSubElement(placement, "LinePlacement")
offset = sl["perpendicularOffset"]
dist = _processProperty(offset)
_addSubElement(linePlacement, "PerpendicularOffset", dist)
if "haloColor" in sl and "haloSize" in sl:
haloElem = _addSubElement(root, "Halo")
_addSubElement(haloElem, "Radius", sl["haloSize"])
haloFillElem = _addSubElement(haloElem, "Fill")
_addCssParameter(haloFillElem, "fill", sl["haloColor"])
_addCssParameter(haloFillElem, "fill-opacity", sl["haloOpacity"])
fillElem = _addSubElement(root, "Fill")
_addCssParameter(fillElem, "fill", color)
followLine = sl.get("followLine", False)
if followLine:
_addVendorOption(root, "followLine", True)
_addVendorOption(root, "group", "yes")
elif "background" not in sl:
_addVendorOption(root, "autoWrap", 50)
if "background" in sl:
background = sl["background"]
avg_size = max(background["sizeX"], background["sizeY"])
shapeName = "rectangle"
if background["shapeType"] == "circle" or background["shapeType"] == "elipse":
shapeName = "circle"
graphic = _addSubElement(root, "Graphic")
mark = _addSubElement(graphic, "Mark")
_addSubElement(graphic, "Opacity", background["opacity"])
_addSubElement(mark, "WellKnownName", shapeName)
fill = _addSubElement(mark, "Fill")
stroke = _addSubElement(mark, "Stroke")
_addCssParameter(stroke, "stroke", background["strokeColor"])
_addCssParameter(fill, "fill", background["fillColor"])
if background["sizeType"] == "buffer":
_addVendorOption(root, "graphic-resize", "stretch")
_addVendorOption(root, "graphic-margin", str(avg_size))
_addVendorOption(root, "spaceAround", str(25))
else:
_addSubElement(graphic, "Size", str(avg_size))
placement = _addSubElement(root, "LabelPlacement")
pointPlacement = _addSubElement(placement, "PointPlacement")
# centers
achorLoc = _addSubElement(pointPlacement, "AnchorPoint")
_addSubElement(achorLoc, "AnchorPointX", "0.5")
_addSubElement(achorLoc, "AnchorPointY", "0.5")
return root
def _lineSymbolizer(sl, graphicStrokeLayer=0):
opacity = _symbolProperty(sl, "opacity")
color = sl.get("color", None)
graphicStroke = sl.get("graphicStroke", None)
width = _symbolProperty(sl, "width")
dasharray = _symbolProperty(sl, "dasharray")
cap = _symbolProperty(sl, "cap")
join = _symbolProperty(sl, "join")
offset = _symbolProperty(sl, "perpendicularOffset")
root = Element("LineSymbolizer")
symbolizers = [root]
stroke = _addSubElement(root, "Stroke")
if graphicStroke is not None:
graphicStrokeElement = _addSubElement(stroke, "GraphicStroke")
graphic = _graphicFromSymbolizer(graphicStroke[graphicStrokeLayer])
graphicStrokeElement.append(graphic[0])
interval = sl.get("graphicStrokeInterval")
dashOffset = sl.get("graphicStrokeOffset")
size = graphicStroke[graphicStrokeLayer].get("size")
try:
fsize = float(size)
finterval = float(interval)
_addCssParameter(
stroke, "stroke-dasharray", "%s %s" % (str(fsize), str(finterval))
)
except:
_addCssParameter(stroke, "stroke-dasharray", "10 10")
_addCssParameter(stroke, "stroke-dashoffset", dashOffset)
if graphicStrokeLayer == 0 and len(graphicStroke) > 1:
for i in range(1, len(graphicStroke)):
symbolizers.extend(_lineSymbolizer(sl, i))
if color is not None:
_addCssParameter(stroke, "stroke", color)
_addCssParameter(stroke, "stroke-width", width)
_addCssParameter(stroke, "stroke-opacity", opacity)
_addCssParameter(stroke, "stroke-linejoin", join)
_addCssParameter(stroke, "stroke-linecap", cap)
if dasharray is not None:
if cap != "butt":
try:
EXTRA_GAP = 2 * width
tokens = [
int(v) + EXTRA_GAP if i % 2 else int(v)
for i, v in enumerate(dasharray.split(" "))
]
except: # in case width is not a number, but an expression
GAP_FACTOR = 2
tokens = [
int(v) * GAP_FACTOR if i % 2 else int(v)
for i, v in enumerate(dasharray.split(" "))
]
dasharray = " ".join([str(v) for v in tokens])
_addCssParameter(stroke, "stroke-dasharray", dasharray)
if offset is not None:
_addSubElement(root, "PerpendicularOffset", offset)
return symbolizers
def _geometryFromSymbolizer(sl):
geomExpr = convertExpression(sl.get("Geometry", None))
if geomExpr is not None:
geomElement = Element("Geometry")
geomElement.append(geomExpr)
return geomElement
def _iconSymbolizer(sl):
path = sl["image"]
if path.lower().endswith("svg"):
return _svgMarkerSymbolizer(sl)
else:
return _rasterImageMarkerSymbolizer(sl)
def _svgMarkerSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
svg = _svgGraphic(sl)
graphic.insert(0, svg)
return root
def _rasterImageMarkerSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
img = _rasterImageGraphic(sl)
graphic.insert(0, img)
return root
def _markSymbolizer(sl):
root, graphic = _basePointSimbolizer(sl)
mark = _markGraphic(sl)
graphic.insert(0, mark)
return root
def _basePointSimbolizer(sl):
size = _symbolProperty(sl, "size")
rotation = _symbolProperty(sl, "rotate")
opacity = _symbolProperty(sl, "opacity")
offset = sl.get("offset", None)
root = Element("PointSymbolizer")
graphic = _addSubElement(root, "Graphic")
_addSubElement(graphic, "Opacity", opacity)
_addSubElement(graphic, "Size", size)
_addSubElement(graphic, "Rotation", rotation)
if offset:
displacement = _addSubElement(graphic, "Displacement")
_addSubElement(displacement, "DisplacementX", offset[0])
_addSubElement(displacement, "DisplacementY", offset[1])
return root, graphic
def _markGraphic(sl):
color = _symbolProperty(sl, "color")
outlineColor = _symbolProperty(sl, "strokeColor")
fillOpacity = _symbolProperty(sl, "fillOpacity", 1.0)
strokeOpacity = _symbolProperty(sl, "strokeOpacity", 1.0)
outlineWidth = _symbolProperty(sl, "strokeWidth")
outlineDasharray = _symbolProperty(sl, "strokeDasharray")
shape = _symbolProperty(sl, "wellKnownName")
mark = Element("Mark")
_addSubElement(mark, "WellKnownName", shape)
if fillOpacity:
fill = SubElement(mark, "Fill")
_addCssParameter(fill, "fill", color)
_addCssParameter(fill, "fill-opacity", fillOpacity)
stroke = _addSubElement(mark, "Stroke")
if strokeOpacity:
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
_addCssParameter(stroke, "stroke-opacity", strokeOpacity)
if outlineDasharray is not None:
_addCssParameter(stroke, "stroke-dasharray", outlineDasharray)
return mark
def _svgGraphic(sl):
path = os.path.basename(sl["image"])
color = _symbolProperty(sl, "color")
outlineColor = _symbolProperty(sl, "strokeColor")
outlineWidth = _symbolProperty(sl, "strokeWidth")
mark = Element("Mark")
_addSubElement(mark, "WellKnownName", "file://%s" % path)
fill = _addSubElement(mark, "Fill")
_addCssParameter(fill, "fill", color)
stroke = _addSubElement(mark, "Stroke")
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
return mark
def _rasterImageGraphic(sl):
path = os.path.basename(sl["image"])
externalGraphic = Element("ExternalGraphic")
attrib = {"xlink:type": "simple", "xlink:href": path}
SubElement(externalGraphic, "OnlineResource", attrib=attrib)
_addSubElement(
externalGraphic, "Format", "image/%s" % os.path.splitext(path)[1][1:]
)
return externalGraphic
def _baseFillSymbolizer(sl):
root = Element("PolygonSymbolizer")
return root
def _graphicFromSymbolizer(sl):
symbolizers = _createSymbolizer(sl)
graphics = []
for s in symbolizers:
graphics.extend([graph for graph in s.iter("Graphic")])
return graphics
def _fillSymbolizer(sl, graphicFillLayer=0):
root = _baseFillSymbolizer(sl)
symbolizers = [root]
opacity = float(_symbolProperty(sl, "opacity", 1))
color = sl.get("color", None)
graphicFill = sl.get("graphicFill", None)
offset = sl.get("offset", None)
if graphicFill is not None:
margin = _symbolProperty(sl, "graphicFillMarginX")
fill = _addSubElement(root, "Fill")
graphicFillElement = _addSubElement(fill, "GraphicFill")
graphic = _graphicFromSymbolizer(graphicFill[graphicFillLayer])
graphicFillElement.append(graphic[0])
_addVendorOption(root, "graphic-margin", margin)
if graphicFillLayer == 0 and len(graphicFill) > 1:
for i in range(1, len(graphicFill)):
symbolizers.extend(_fillSymbolizer(sl, i))
if color is not None:
fillOpacity = float(_symbolProperty(sl, "fillOpacity", 1))
fill = _addSubElement(root, "Fill")
_addCssParameter(fill, "fill", color)
_addCssParameter(fill, "fill-opacity", fillOpacity * opacity)
outlineColor = _symbolProperty(sl, "outlineColor")
if outlineColor is not None:
outlineDasharray = _symbolProperty(sl, "outlineDasharray")
outlineWidth = _symbolProperty(sl, "outlineWidth")
outlineOpacity = float(_symbolProperty(sl, "outlineOpacity"))
# borderWidthUnits = props["outline_width_unit"]
stroke = _addSubElement(root, "Stroke")
_addCssParameter(stroke, "stroke", outlineColor)
_addCssParameter(stroke, "stroke-width", outlineWidth)
_addCssParameter(stroke, "stroke-opacity", outlineOpacity * opacity)
# _addCssParameter(stroke, "stroke-linejoin", join)
# _addCssParameter(stroke, "stroke-linecap", cap)
if outlineDasharray is not None:
_addCssParameter(
stroke, "stroke-dasharray", " ".join(str(v) for v in outlineDasharray)
)
if offset:
pass # TODO: Not sure how to add this in SLD
return symbolizers
#######################
operators = [
"PropertyName",
"Or",
"And",
"PropertyIsEqualTo",
"PropertyIsNotEqualTo",
"PropertyIsLessThanOrEqualTo",
"PropertyIsGreaterThanOrEqualTo",
"PropertyIsLessThan",
"PropertyIsGreaterThan",
"PropertyIsLike",
"Add",
"Sub",
"Mul",
"Div",
"Not",
]
operatorToFunction = {
"PropertyIsEqualTo": "equalTo",
"PropertyIsNotEqualTo": "notEqual",
"PropertyIsLessThanOrEqualTo": "lessEqualThan",
"PropertyIsGreaterThanOrEqualTo": "greaterEqualThan",
"PropertyIsLessThan": "lessThan",
"PropertyIsGreaterThan": "greaterThan",
}
def convertExpression(exp, inFunction=False):
if exp is None:
return None
elif isinstance(exp, list):
if exp[0] in operators and not (inFunction and exp[0] in operatorToFunction):
return handleOperator(exp)
else:
return handleFunction(exp)
else:
return handleLiteral(exp)
def handleOperator(exp):
name = exp[0]
elem = Element("ogc:" + name)
if name == "PropertyIsLike":
elem.attrib["wildCard"] = "%"
if name == "PropertyName":
elem.text = exp[1]
else:
for operand in exp[1:]:
elem.append(convertExpression(operand))
return elem
def handleFunction(exp):
name = operatorToFunction.get(exp[0], exp[0])
elem = Element("ogc:Function", name=name)
if len(exp) > 1:
for arg in exp[1:]:
elem.append(convertExpression(arg, True))
return elem
def handleLiteral(v):
elem = Element("ogc:Literal")
elem.text = str(v)
return elem
|
gpl-2.0
| 1,951,233,122,359,109,400
| 33.601667
| 111
| 0.630943
| false
| 3.677768
| false
| false
| false
|
TommesDee/cpachecker
|
scripts/benchmark/tools/wolverine.py
|
2
|
1148
|
import subprocess
import benchmark.util as Util
import benchmark.tools.template
import benchmark.result as result
class Tool(benchmark.tools.template.BaseTool):
def getExecutable(self):
return Util.findExecutable('wolverine')
def getVersion(self, executable):
return subprocess.Popen([executable, '--version'],
stdout=subprocess.PIPE).communicate()[0].split()[1].strip()
def getName(self):
return 'Wolverine'
def getStatus(self, returncode, returnsignal, output, isTimeout):
if "VERIFICATION SUCCESSFUL" in output:
assert returncode == 0
status = result.STR_TRUE
elif "VERIFICATION FAILED" in output:
assert returncode == 10
status = result.STR_FALSE_LABEL
elif returnsignal == 9:
status = "TIMEOUT"
elif returnsignal == 6 or (returncode == 6 and "Out of memory" in output):
status = "OUT OF MEMORY"
elif returncode == 6 and "PARSING ERROR" in output:
status = "PARSING ERROR"
else:
status = "FAILURE"
return status
|
apache-2.0
| 3,404,873,571,651,703,300
| 30.054054
| 91
| 0.614111
| false
| 4.555556
| false
| false
| false
|
zfrenchee/pandas
|
pandas/core/frame.py
|
1
|
233731
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat,
standardize_mapping)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_ensure_index_from_sequences)
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv'])
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = {k: v for k, v in compat.iteritems(data) if k in columns}
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(np.nan)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = _ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file (DEPRECATED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
Parameters
----------
fname : str
string file path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced._from_array(
values, index=self.index, name=label, fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self._take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = pd.DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = pd.DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include='bool')
c
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced._from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
For python 3.6 and above, the columns are inserted in the order of
\*\*kwargs. For python 3.5 and earlier, since \*\*kwargs is unordered,
the columns are inserted in alphabetical order at the end of your
DataFrame. Assigning multiple columns within the same ``assign``
is possible, but you cannot reference other columns created within
the same ``assign`` call.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# preserve order for 3.6 and later, but sort by key for 3.5 and earlier
if PY36:
results = results.items()
else:
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, value.dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=np.nan, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.astype(object).values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None,
try_cast=True):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = {col: f(col) for col in this}
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = {i: f(i) for i, col in enumerate(this.columns)}
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None, try_cast=True):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_series_infer(self, other, func, level=None,
fill_value=None, try_cast=True):
if len(other) == 0:
return self * np.nan
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_match_index(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index],
try_cast=try_cast)
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True):
import pandas.core.computation.expressions as expressions
# unique
if self.columns.is_unique:
def _compare(a, b):
return {col: func(a[col], b[col]) for col in a.columns}
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i])
for i, col in enumerate(a.columns)}
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep, try_cast=True):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
Function that takes two series as inputs and return a Series or a
scalar
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
Examples
--------
>>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
A B
0 0 3
1 0 3
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = np.nan
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Returns
-------
combined : DataFrame
Examples
--------
df1's values prioritized, use values from df2 to fill holes:
>>> df1 = pd.DataFrame([[1, np.nan]])
>>> df2 = pd.DataFrame([[3, 4]])
>>> df1.combine_first(df2)
0 1
0 1 4.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isna(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isna(x_values)
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If ``other`` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Misc methods
def _get_valid_indices(self):
is_valid = self.count(1) > 0
return self.index[is_valid]
@Appender(_shared_docs['valid_index'] % {
'position': 'first', 'klass': 'DataFrame'})
def first_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[0] if len(valid_indices) else None
@Appender(_shared_docs['valid_index'] % {
'position': 'last', 'klass': 'DataFrame'})
def last_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[-1] if len(valid_indices) else None
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
args=args, **kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notna(frame).sum(axis=axis)
else:
counts = notna(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = accessor.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
|
bsd-3-clause
| 7,829,162,651,239,753,000
| 35.73283
| 85
| 0.523551
| false
| 4.406942
| false
| false
| false
|
martijnvermaat/rpclib
|
src/rpclib/test/interop/server/httprpc_csv_basic.py
|
1
|
1859
|
#!/usr/bin/env python
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('rpclib.protocol.xml')
logger.setLevel(logging.DEBUG)
from rpclib.application import Application
from rpclib.interface.wsdl import Wsdl11
from rpclib.protocol.csv import OutCsv
from rpclib.protocol.http import HttpRpc
from rpclib.server.wsgi import WsgiApplication
from rpclib.test.interop.server._service import services
httprpc_csv_application = Application(services,
'rpclib.test.interop.server.httprpc.csv', HttpRpc(), OutCsv(), Wsdl11())
if __name__ == '__main__':
try:
from wsgiref.simple_server import make_server
from wsgiref.validate import validator
wsgi_application = WsgiApplication(httprpc_csv_application)
server = make_server('0.0.0.0', 9755, validator(wsgi_application))
logger.info('Starting interop server at %s:%s.' % ('0.0.0.0', 9755))
logger.info('WSDL is at: /?wsdl')
server.serve_forever()
except ImportError:
print("Error: example server code requires Python >= 2.5")
|
lgpl-2.1
| -6,483,269,929,289,989,000
| 37.729167
| 80
| 0.737493
| false
| 3.83299
| false
| false
| false
|
pi2-picole/api
|
vendor/models.py
|
1
|
3896
|
from django.db import models
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.validators import MinValueValidator
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Create your models here.
class Popsicle(models.Model):
flavor = models.CharField(max_length=25, default="", unique=True)
price = models.CharField(max_length=4, default='100')
is_active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.flavor)
class Machine(models.Model):
is_active = models.BooleanField(default=True)
label = models.CharField(max_length=25, default="")
seller = models.ForeignKey(User, related_name='machines', null=True)
def __str__(self):
return "{}'s machine: #{} {}".format(self.label, self.id, self.locations.last())
class Location(models.Model):
lat = models.CharField(max_length=15, default="")
lng = models.CharField(max_length=15, default="")
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True},
related_name="locations"
)
updated_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "(lat:{},lng:{}) at {}".format(self.lat, self.lng, self.updated_at)
class Stock(models.Model):
popsicle = models.ForeignKey(
Popsicle,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
amount = models.PositiveSmallIntegerField(default=0)
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True},
related_name="stocks"
)
updated_at = models.DateField(auto_now=True)
class Transaction(models.Model):
class Meta:
abstract = True
popsicle = models.ForeignKey(
Popsicle,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
amount = models.PositiveSmallIntegerField(
default=0, validators=[MinValueValidator(1)]
)
machine = models.ForeignKey(
Machine,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
timestamp = models.DateTimeField(auto_now_add=True)
class Purchase(Transaction):
lid_was_released = models.BooleanField(default=False)
class PopsicleEntry(Transaction):
pass
class PopsicleRemoval(Transaction):
pass
@receiver(post_save, sender=Purchase)
@receiver(post_save, sender=PopsicleRemoval)
def remove_from_stock(sender, instance, created, **kwargs):
if created:
stock = Stock.objects.get(
popsicle=instance.popsicle, machine=instance.machine
)
stock.amount -= instance.amount
stock.save()
@receiver(post_save, sender=PopsicleEntry)
def add_to_stock(sender, instance, created, **kwargs):
if created:
stock = Stock.objects.get(
popsicle=instance.popsicle, machine=instance.machine
)
stock.amount += instance.amount
stock.save()
@receiver(post_save, sender=Machine)
def create_stocks_for_machine(sender, instance, created, **kwargs):
if created:
stocks = []
for pop in Popsicle.objects.all():
stocks.append(Stock(machine=instance, popsicle=pop, amount=0))
Stock.objects.bulk_create(stocks)
@receiver(post_save, sender=Popsicle)
def create_stocks_for_popsicle(sender, instance, created, **kwargs):
if created:
stocks = []
for machine in Machine.objects.all():
stocks.append(Stock(machine=machine, popsicle=instance, amount=0))
Stock.objects.bulk_create(stocks)
|
mit
| -7,058,885,298,412,033,000
| 29.677165
| 88
| 0.667608
| false
| 3.637722
| false
| false
| false
|
priestc/MultiExplorer
|
local_settings_default.py
|
1
|
1056
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
'TIMEOUT': 500000,
}
}
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
WALLET_SUPPORTED_CRYPTOS = []
WALLET_SUPPORTED_FIATS = ['usd', 'eur', 'cny', 'jpy', 'idr', 'cad', 'gbp']
EXCHANGE_KICK_INTERVAL_MINUTES = 10
EXCHANGE_FEE_PERCENTAGE = 1.5
MAX_MEMO_SIZE_BYTES = 1000
MEMO_SERVER_PRIVATE_MODE = False
MEMO_SERVER_PULL = [
'https://multiexplorer.com/memo/pull'
]
MEMO_SERVER_PUSH = [
'https://multiexplorer.com/memo'
]
DEBUG = False
LOGIN_TRIES = 5
ALLOWED_HOSTS = []
PRICE_INTERVAL_SECONDS = 500
QUANDL_APIKEY = None
IP_FILTER_INTERVAL = {'minutes': 5}
IP_FILTER_HITS = 25
|
mit
| 1,227,060,583,103,808,500
| 21.956522
| 74
| 0.648674
| false
| 2.742857
| false
| true
| false
|
erben22/fib
|
fibserver.py
|
1
|
1239
|
"""Implementation of a web server API that serves up Fibonacci numbers.
TODO: Add some additional error handling:
- Need some handling around the query parameter on the
API. What if it is not supplied for example?
"""
import web
from fibonacci import Fibonacci
urls = (
'/fibonacci', 'FibonacciHandler'
)
"""Definition of the API endpoint to HTTP handler class."""
class FibonacciHandler:
"""Fibonacci endpoint handler. Will expect a parameter to be present
for the sequence to calcualte, and if present, will create an
instance of our Fibonacci class to calculate the value and return
it to the caller.
"""
def GET(self):
"""Implementation of the GET handler interface."""
try:
desired_sequence = int(web.input().desired_sequence)
fibonacci = Fibonacci(desired_sequence)
return fibonacci.calculate()
except:
raise web.HTTPError('400 Bad Request', {})
if __name__ == "__main__":
"""Main method that fires up the web application and listens for
prospective requests from various clients.
"""
web.config.debug = False
app = web.application(urls, globals())
app.run()
|
mit
| -1,917,998,892,817,449,200
| 28.5
| 73
| 0.652139
| false
| 4.075658
| false
| false
| false
|
esteluk/reinhardt
|
memberinfo/management/commands/populate.py
|
2
|
3209
|
import settings
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from compsoc.memberinfo.models import *
from compsoc.events.models import *
from datetime import date
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list
help = "Populates some simple data into the database"
requires_model_validation = True
def handle_noargs(self, **options):
'''
Inserts the following data into the database:
Terms
Sample Event Types
Sample Locations
Sample Events
Assumes:
syncdb has been run, and there is a user
'''
#u = User.objects.all()[0]
#try:
#mem = Member(user=u,showDetails=True,guest=False)
#mem.save()
#except: pass #user already has member object
# sort of broken :p
#lists = ['announce','discuss','exec','gaming','jobs','socials','techteam']
#for suffix in lists:
#u.mailinglist_set.create(list='compsoc-'+suffix+'@uwcs.co.uk')
terms = [
# from http://www2.warwick.ac.uk/study/termdates/
# 2007/2008
('AU',1,date(2007,10,01)),
('SP',11,date(2008,1,07)),
('SU',21,date(2008,4,21)),
# 2008/2009
('AU',1,date(2008,9,29)),
('SP',11,date(2009,1,5)),
('SU',21,date(2009,4,20)),
# 2009/2010
('AU',1,date(2009,10,5)),
('SP',11,date(2010,1,11)),
('SU',21,date(2010,4,26)),
# 2010/2011
('AU',1,date(2010,10,4)),
('SP',11,date(2011,1,10)),
('SU',21,date(2011,4,27)),
# 2011/2012
('AU',1,date(2011,10,3)),
('SP',11,date(2012,1,9)),
('SU',21,date(2012,4,23)),
# 2012/2013
('AU',1,date(2012,10,1)),
('SP',11,date(2013,1,7)),
('SU',21,date(2013,4,22)),
# 2013/2014
('AU',1,date(2013,9,30)),
('SP',11,date(2014,1,6)),
('SU',21,date(2014,4,23)),
]
for (t,num,d) in terms:
term = Term(start_date=d,start_number=num,length=10,which=t)
term.save()
#for yr in range(2001,2009):
#u.memberjoin_set.create(year=yr)
#is this necessary?
#u.save()
# Event Types
event_types = [
{"name":"LAN Party", "info":"Weekend long sweat off.", "target":"GAM"},
{"name":"Pub Social", "info":"Pub food with CompSoc.", "target":"SCL"},
{"name":"Exec Meeting", "info":"Weekly meeting to discuss stuff.", "target":"SCT"},
]
for et in event_types:
EventType.objects.create(**et)
# locations
locations = [
{"name":"Lib2", "description":"Next to the Cafe Library"},
{"name":"The Phantom Coach", "description":"Up the road from tescos. Nice pub!"},
{"name":"DCS Undergrad Labs", "description":"The building next to the Zeeman building."},
]
for l in locations:
Location.objects.create(**l)
|
agpl-3.0
| 7,771,329,610,080,966,000
| 31.414141
| 101
| 0.511374
| false
| 3.55371
| false
| false
| false
|
McMaNGOS/IIDXCV
|
IIDXCV_video.py
|
1
|
3111
|
from imutils.video import FileVideoStream
from pykeyboard import PyKeyboard
import cv2
import numpy as np
import argparse
import imutils
import time
# argument parser (for video, will use stream/live frames in future)
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="Path to video file")
args = vars(ap.parse_args())
# start threaded video stream, give buffer time to fill
print("Initializing video stream...")
fvs = FileVideoStream(args["video"]).start()
time.sleep(1.0)
# class for constructing key objects
class Key:
'Data for each key (detection pixel x/y location, name)'
def __init__(self, x, y, name, keyButton):
# x and y axis of pixel to check for
self.x = x
self.y = y
# name of key (to print in console)
self.name = name
# keyboard button to press
self.keyButton = keyButton
# presses and holds input key, adds key to array
def pressKey(key, pressedArray):
keyboard.press_key(key.keyButton)
pressedArray.append(key)
# releases input key, removes key from array
def releaseKey(key, pressedArray):
keyboard.release_key(key.keyButton)
pressedArray.remove(key)
# define keys (8 for IIDX-style games)
scratch = Key(16, 99, "SCRATCH", 'X')
key1 = Key(70, 99, "KEY 1", 'C')
key2 = Key(104, 99, "KEY 2", 'F')
key3 = Key(135, 99, "KEY 3", 'V')
key4 = Key(169, 99, "KEY 4", 'G')
key5 = Key(199, 99, "KEY 5", 'B')
key6 = Key(232, 99, "KEY 6", 'H')
key7 = Key(263, 99, "KEY 7", 'N')
# put keys in array
keyArray = [scratch, key1, key2, key3, key4, key5, key6, key7]
# initialize keyboard
keyboard = PyKeyboard()
# create background subtractor
bgSub = cv2.createBackgroundSubtractorMOG2()
# array for checking which keys were pressed on a frame
keysPressed = []
# loop over frames from the video file stream
while fvs.more():
# grab current frame from video stream
frame = fvs.read()
# crop the grabbed frame
cropped_frame = frame[0:100, 49:336]
# old crop value (for whole note field):
# cropped_frame = frame[0:484, 49:336]
# apply mask to frame
mask = bgSub.apply(cropped_frame)
# keys to print (underscores by default, for readability) [for debugging]
# printArray = ['_______', '_____', '_____', '_____', '_____', '_____', '_____', '_____']
# initialPrintArray = printArray
# loop through keys in array
for idx, Key in enumerate(keyArray):
# detect pixel at given coordinates
pixel = mask[Key.y, Key.x]
# if white pixel found, pressKey
if pixel == 255 and Key not in keysPressed:
pressKey(Key, keysPressed)
# printArray[idx] = Key.name
# if white pixel not found & key is in keysPressed, releaseKey
if pixel != 255 and Key in keysPressed:
releaseKey(Key, keysPressed)
# print if array is different from default (= key detected)
# if printArray != initialPrintArray:
# print printArray
# display frame with mask
cv2.imshow("output", mask)
cv2.waitKey(1)
# cleanup
cv2.destroyAllWindows()
fvs.stop()
|
gpl-3.0
| -3,277,442,641,834,810,000
| 26.776786
| 93
| 0.647702
| false
| 3.414929
| false
| false
| false
|
SvenVD/rpisurv
|
surveillance/core/worker.py
|
1
|
3534
|
import time
import subprocess
import platform
import os
import shlex
import signal
from util.setuplogging import setup_logging
def worker(name,url,omxplayer_extra_options,coordinates,stopworker,aidx):
"""
Example substituted: ['/usr/bin/omxplayer', '--video_fifo', '1', '--video_queue', '1', '--live', '--timeout', '60', '--aidx', '-1', '-o', 'hdmi', 'rtsp://184.72.239.149:554/vod/mp4:BigBuckBunny_175k.mov', '--win', '960 0 1920 540', '--dbus_name', 'org.mpris.MediaPlayer2.cam_stream2']
"""
def start_subprocess(url,coordinates):
command_line='/usr/bin/omxplayer \
--video_fifo 1 \
--video_queue 1 \
--live \
--timeout 60 \
--aidx ' + str(aidx) + ' \
-o hdmi \
--threshold 0 \
' + ' ' + omxplayer_extra_options + ' ' + url + ' --win ' + '"' + " ".join(map(str,coordinates)) + '"' + ' --dbus_name org.mpris.MediaPlayer2.' + name
command_line_shlex=shlex.split(command_line)
logger.debug("Starting stream " + name + " with commandline " + str(command_line_shlex))
#The other process is just to be able to develop/simulate on a Windows or OSX machine
if platform.system() == "Windows":
proc=subprocess.Popen('echo this is a subprocess started with coordinates ' + str(coordinates) + '& ping 192.168.0.160 /t >NUL', shell=True)
elif platform.system() == "Linux":
proc=subprocess.Popen(command_line_shlex,preexec_fn=os.setsid,stdin=subprocess.PIPE)
else:
proc=subprocess.Popen('echo this is a subprocess started with coordinates ' + str(coordinates), shell=True)
return proc
def stop_subprocess(proc):
#The other process is just to be able to develop on a Windows or OSX machine
if platform.system() == "Windows":
proc.kill()
else:
#This kill the process group so including all children
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
proc.wait()
#Ctrl-C handling
def signal_sigint_handler(signum,frame):
logger.info("Ctrl C was pressed")
stopworker.value = True
def signal_sigterm_handler(signum,frame):
logger.info("This process was sigtermed")
stopworker.value = True
signal.signal(signal.SIGINT, signal_sigint_handler)
signal.signal(signal.SIGTERM, signal_sigterm_handler)
#Logger setup
logger = setup_logging( "logs/" + name + ".log",__name__)
logger.debug("logger from " + name)
#Start stream and watchdog
attempts=0
proc=start_subprocess(url,coordinates)
logger.debug("stopworker.value = " + name + " " + str(stopworker.value))
while attempts < 100000 and stopworker.value == False:
#logger.debug("stopworker.value in loop = " + name + " " + str(stopworker.value))
if proc.poll() != None:
proc.communicate(input="\n")
proc=start_subprocess(url,coordinates)
attempts = attempts + 1
#Wait for omxplayer to crash, or not
time.sleep(10)
logger.info("Trying to restart " + name +" attempts:" + str(attempts))
else:
attempts=0
time.sleep(0.1)
#If we come to this point, we are instructed to kill this stream
logger.debug("This stream " + name + " is about to be stopped")
stop_subprocess(proc)
logger.info("This stream " + name + " has been stopped")
|
gpl-2.0
| -9,006,318,421,546,378,000
| 44.307692
| 288
| 0.599887
| false
| 3.783726
| false
| false
| false
|
team-vigir/vigir_behaviors
|
vigir_flexbe_states/src/vigir_flexbe_states/calculate_force_torque_calibration_state_test.py
|
1
|
1925
|
# Test the FT Calibration state by calling the python class and doing the calculation here
# Moritz Schappler, schappler@irt.uni-hannover.de, 2015-05
# Institut fuer Regelungstechnik, Universitaet Hannover
# remotedebug
# import pydevd
# pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
# import definitions
from calculate_force_torque_calibration_state import CalculateForceTorqueCalibration
from generate_trajectory_from_txtfile_state import GenerateTrajectoryFromTxtfileState
# initialize rospy and rostime
import rospy
rospy.init_node('calib_test_node', anonymous=True)
# define userdata
class Userdata(object):
def __init__(self):
self.trajectories = []
self.ft_calib_data = []
# Generating the trajectory from text files
# txtfile_name_left_arm = '~/ft_calib/input/l_arm.txt'
# txtfile_name_right_arm = '~/ft_calib/input/r_arm.txt'
txtfile_name_left_arm = '~/ft_calib/input/SI_E065_FT_Calib_Arms_Payload_Left.txt'
txtfile_name_right_arm = '~/ft_calib/input/SI_E065_FT_Calib_Arms_Payload_Right.txt'
transitiontime = 0.5
settlingtime = 0.5
userdata = Userdata()
GTFT = GenerateTrajectoryFromTxtfileState(txtfile_name_left_arm, txtfile_name_right_arm, transitiontime, settlingtime)
GTFT.execute(userdata)
# Calculation the calibration with data recorded with the behaviour
# bag_filename = '/home/schappler/ft_calib/ft_logs/FTCalib.bag'
# bag_filename = '/home/schappler/ft_calib/ft_logs/R05_both_20150426_w_flangue.bag'
# bag_filename = '/home/schappler/IRT_DRC/Experiments/Output/SI_E047_FT_Calib_Arms/S02_20150504_payload_merge.bag'
bag_filename = '~/ft_calib/ft_logs/SI_E065_FT_Calib_Arms_Payload.bag'
calibration_chain = ['left_arm', 'right_arm']
trajectories_command = GTFT._trajectories
CFTC = CalculateForceTorqueCalibration(bag_filename, calibration_chain, settlingtime, trajectories_command)
CFTC.execute(userdata)
print CFTC._ft_calib_data
|
bsd-3-clause
| -4,215,576,476,937,321,000
| 35.320755
| 118
| 0.772468
| false
| 2.912254
| false
| false
| false
|
jamesgk/ufo2ft
|
Lib/ufo2ft/util.py
|
1
|
14489
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
try:
from inspect import getfullargspec as getargspec # PY3
except ImportError:
from inspect import getargspec # PY2
from copy import deepcopy
from fontTools.misc.py23 import unichr
from fontTools import ttLib
from fontTools import subset
from fontTools import unicodedata
from fontTools.feaLib.builder import addOpenTypeFeatures
from fontTools.misc.transform import Identity, Transform
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.transformPen import TransformPen
import logging
logger = logging.getLogger(__name__)
def makeOfficialGlyphOrder(font, glyphOrder=None):
""" Make the final glyph order for 'font'.
If glyphOrder is None, try getting the font.glyphOrder list.
If not explicit glyphOrder is defined, sort glyphs alphabetically.
If ".notdef" glyph is present in the font, force this to always be
the first glyph (at index 0).
"""
if glyphOrder is None:
glyphOrder = getattr(font, "glyphOrder", ())
names = set(font.keys())
order = []
if ".notdef" in names:
names.remove(".notdef")
order.append(".notdef")
for name in glyphOrder:
if name not in names:
continue
names.remove(name)
order.append(name)
order.extend(sorted(names))
return order
class _GlyphSet(dict):
@classmethod
def from_layer(cls, font, layerName=None, copy=False, skipExportGlyphs=None):
"""Return a mapping of glyph names to glyph objects from `font`."""
if layerName is not None:
layer = font.layers[layerName]
else:
layer = font.layers.defaultLayer
if copy:
self = _copyLayer(layer, obj_type=cls)
self.lib = deepcopy(layer.lib)
else:
self = cls((g.name, g) for g in layer)
self.lib = layer.lib
# If any glyphs in the skipExportGlyphs list are used as components, decompose
# them in the containing glyphs...
if skipExportGlyphs:
for glyph in self.values():
if any(c.baseGlyph in skipExportGlyphs for c in glyph.components):
deepCopyContours(self, glyph, glyph, Transform(), skipExportGlyphs)
if hasattr(glyph, "removeComponent"): # defcon
for c in [
component
for component in glyph.components
if component.baseGlyph in skipExportGlyphs
]:
glyph.removeComponent(c)
else: # ufoLib2
glyph.components[:] = [
c
for c in glyph.components
if c.baseGlyph not in skipExportGlyphs
]
# ... and then remove them from the glyph set, if even present.
for glyph_name in skipExportGlyphs:
if glyph_name in self:
del self[glyph_name]
self.name = layer.name if layerName is not None else None
return self
def _copyLayer(layer, obj_type=dict):
# defcon.Glyph doesn't take a name argument, ufoLib2 requires one...
try:
g = next(iter(layer))
except StopIteration: # layer is empty
return obj_type()
cls = g.__class__
if "name" in getargspec(cls.__init__).args:
def newGlyph(name):
return cls(name=name)
else:
def newGlyph(name):
# use instantiateGlyphObject() to keep any custom sub-element classes
# https://github.com/googlefonts/ufo2ft/issues/363
g2 = g.layer.instantiateGlyphObject()
g2.name = name
return g2
# copy everything except unused attributes: 'guidelines', 'note', 'image'
glyphSet = obj_type()
for glyph in layer:
copy = newGlyph(glyph.name)
copy.width = glyph.width
copy.height = glyph.height
copy.unicodes = list(glyph.unicodes)
copy.anchors = [dict(a) for a in glyph.anchors]
copy.lib = deepcopy(glyph.lib)
pointPen = copy.getPointPen()
glyph.drawPoints(pointPen)
glyphSet[glyph.name] = copy
return glyphSet
def deepCopyContours(
glyphSet, parent, composite, transformation, specificComponents=None
):
"""Copy contours from component to parent, including nested components.
specificComponent: an optional list of glyph name strings. If not passed or
None, decompose all components of a glyph unconditionally and completely. If
passed, only completely decompose components whose baseGlyph is in the list.
"""
for nestedComponent in composite.components:
# Because this function works recursively, test at each turn if we are going to
# recurse into a specificComponent. If so, set the specificComponents argument
# to None so we unconditionally decompose the possibly nested component
# completely.
specificComponentsEffective = specificComponents
if specificComponentsEffective:
if nestedComponent.baseGlyph not in specificComponentsEffective:
continue
else:
specificComponentsEffective = None
try:
nestedBaseGlyph = glyphSet[nestedComponent.baseGlyph]
except KeyError:
logger.warning(
"dropping non-existent component '%s' in glyph '%s'",
nestedComponent.baseGlyph,
parent.name,
)
else:
deepCopyContours(
glyphSet,
parent,
nestedBaseGlyph,
transformation.transform(nestedComponent.transformation),
specificComponents=specificComponentsEffective,
)
# Check if there are any contours to copy before instantiating pens.
if composite != parent and len(composite):
if transformation == Identity:
pen = parent.getPen()
else:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will
# reverse the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
for contour in composite:
contour.draw(pen)
def makeUnicodeToGlyphNameMapping(font, glyphOrder=None):
""" Make a unicode: glyph name mapping for this glyph set (dict or Font).
Raises InvalidFontData exception if multiple glyphs are mapped to the
same unicode codepoint.
"""
if glyphOrder is None:
glyphOrder = makeOfficialGlyphOrder(font)
mapping = {}
for glyphName in glyphOrder:
glyph = font[glyphName]
unicodes = glyph.unicodes
for uni in unicodes:
if uni not in mapping:
mapping[uni] = glyphName
else:
from ufo2ft.errors import InvalidFontData
InvalidFontData(
"cannot map '%s' to U+%04X; already mapped to '%s'"
% (glyphName, uni, mapping[uni])
)
return mapping
def compileGSUB(featureFile, glyphOrder):
""" Compile and return a GSUB table from `featureFile` (feaLib
FeatureFile), using the given `glyphOrder` (list of glyph names).
"""
font = ttLib.TTFont()
font.setGlyphOrder(glyphOrder)
addOpenTypeFeatures(font, featureFile, tables={"GSUB"})
return font.get("GSUB")
def closeGlyphsOverGSUB(gsub, glyphs):
""" Use the FontTools subsetter to perform a closure over the GSUB table
given the initial `glyphs` (set of glyph names, str). Update the set
in-place adding all the glyph names that can be reached via GSUB
substitutions from this initial set.
"""
subsetter = subset.Subsetter()
subsetter.glyphs = glyphs
gsub.closure_glyphs(subsetter)
def classifyGlyphs(unicodeFunc, cmap, gsub=None):
""" 'unicodeFunc' is a callable that takes a Unicode codepoint and
returns a string denoting some Unicode property associated with the
given character (or None if a character is considered 'neutral').
'cmap' is a dictionary mapping Unicode codepoints to glyph names.
'gsub' is an (optional) fonttools GSUB table object, used to find all
the glyphs that are "reachable" via substitutions from the initial
sets of glyphs defined in the cmap.
Returns a dictionary of glyph sets associated with the given Unicode
properties.
"""
glyphSets = {}
neutralGlyphs = set()
for uv, glyphName in cmap.items():
key = unicodeFunc(uv)
if key is None:
neutralGlyphs.add(glyphName)
else:
glyphSets.setdefault(key, set()).add(glyphName)
if gsub is not None:
if neutralGlyphs:
closeGlyphsOverGSUB(gsub, neutralGlyphs)
for glyphs in glyphSets.values():
s = glyphs | neutralGlyphs
closeGlyphsOverGSUB(gsub, s)
glyphs.update(s - neutralGlyphs)
return glyphSets
def unicodeInScripts(uv, scripts):
""" Check UnicodeData's ScriptExtension property for unicode codepoint
'uv' and return True if it intersects with the set of 'scripts' provided,
False if it does not intersect.
Return None for 'Common' script ('Zyyy').
"""
sx = unicodedata.script_extension(unichr(uv))
if "Zyyy" in sx:
return None
return not sx.isdisjoint(scripts)
def calcCodePageRanges(unicodes):
""" Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
"""
codepageRanges = set()
chars = [unichr(u) for u in unicodes]
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
hasLineart = "┤" in chars
for char in chars:
if char == "Þ" and hasAscii:
codepageRanges.add(0) # Latin 1
elif char == "Ľ" and hasAscii:
codepageRanges.add(1) # Latin 2: Eastern Europe
if hasLineart:
codepageRanges.add(58) # Latin 2
elif char == "Б":
codepageRanges.add(2) # Cyrillic
if "Ѕ" in chars and hasLineart:
codepageRanges.add(57) # IBM Cyrillic
if "╜" in chars and hasLineart:
codepageRanges.add(49) # MS-DOS Russian
elif char == "Ά":
codepageRanges.add(3) # Greek
if hasLineart and "½" in chars:
codepageRanges.add(48) # IBM Greek
if hasLineart and "√" in chars:
codepageRanges.add(60) # Greek, former 437 G
elif char == "İ" and hasAscii:
codepageRanges.add(4) # Turkish
if hasLineart:
codepageRanges.add(56) # IBM turkish
elif char == "א":
codepageRanges.add(5) # Hebrew
if hasLineart and "√" in chars:
codepageRanges.add(53) # Hebrew
elif char == "ر":
codepageRanges.add(6) # Arabic
if "√" in chars:
codepageRanges.add(51) # Arabic
if hasLineart:
codepageRanges.add(61) # Arabic; ASMO 708
elif char == "ŗ" and hasAscii:
codepageRanges.add(7) # Windows Baltic
if hasLineart:
codepageRanges.add(59) # MS-DOS Baltic
elif char == "₫" and hasAscii:
codepageRanges.add(8) # Vietnamese
elif char == "ๅ":
codepageRanges.add(16) # Thai
elif char == "エ":
codepageRanges.add(17) # JIS/Japan
elif char == "ㄅ":
codepageRanges.add(18) # Chinese: Simplified chars
elif char == "ㄱ":
codepageRanges.add(19) # Korean wansung
elif char == "央":
codepageRanges.add(20) # Chinese: Traditional chars
elif char == "곴":
codepageRanges.add(21) # Korean Johab
elif char == "♥" and hasAscii:
codepageRanges.add(30) # OEM Character Set
# TODO: Symbol bit has a special meaning (check the spec), we need
# to confirm if this is wanted by default.
# elif unichr(0xF000) <= char <= unichr(0xF0FF):
# codepageRanges.add(31) # Symbol Character Set
elif char == "þ" and hasAscii and hasLineart:
codepageRanges.add(54) # MS-DOS Icelandic
elif char == "╚" and hasAscii:
codepageRanges.add(62) # WE/Latin 1
codepageRanges.add(63) # US
elif hasAscii and hasLineart and "√" in chars:
if char == "Å":
codepageRanges.add(50) # MS-DOS Nordic
elif char == "é":
codepageRanges.add(52) # MS-DOS Canadian French
elif char == "õ":
codepageRanges.add(55) # MS-DOS Portuguese
if hasAscii and "‰" in chars and "∑" in chars:
codepageRanges.add(29) # Macintosh Character Set (US Roman)
# when no codepage ranges can be enabled, fall back to enabling bit 0
# (Latin 1) so that the font works in MS Word:
# https://github.com/googlei18n/fontmake/issues/468
if not codepageRanges:
codepageRanges.add(0)
return codepageRanges
class _LazyFontName(object):
def __init__(self, font):
self.font = font
def __str__(self):
from ufo2ft.fontInfoData import getAttrWithFallback
return getAttrWithFallback(self.font.info, "postscriptFontName")
def getDefaultMasterFont(designSpaceDoc):
defaultSource = designSpaceDoc.findDefault()
if not defaultSource:
from ufo2ft.errors import InvalidDesignSpaceData
raise InvalidDesignSpaceData(
"Can't find base (neutral) master in DesignSpace document"
)
if not defaultSource.font:
from ufo2ft.errors import InvalidDesignSpaceData
raise InvalidDesignSpaceData(
"DesignSpace source '%s' is missing required 'font' attribute"
% getattr(defaultSource, "name", "<Unknown>")
)
return defaultSource.font
|
mit
| 224,002,478,724,119,800
| 35.559494
| 87
| 0.6118
| false
| 4.102557
| false
| false
| false
|
whosonfirst/py-mapzen-whosonfirst-pip
|
mapzen/whosonfirst/pip/utils.py
|
1
|
4464
|
import mapzen.whosonfirst.pip
import mapzen.whosonfirst.uri
import mapzen.whosonfirst.placetypes
import shapely.geometry
import logging
import requests
import json
def reverse_geocoordinates(feature):
logging.warning("mapzen.whosonfirst.pip.utils.reverse_geocoordinates has been deprecated, you should use mapzen.whosonfirst.utils.reverse_geocoordinates instead")
props = feature['properties']
lat = props.get('reversegeo:latitude', None)
lon = props.get('reversegeo:longitude', None)
if not lat or not lon:
lat = props.get('lbl:latitude', None)
lon = props.get('lbl:longitude', None)
if not lat or not lon:
lat = props.get('geom:latitude', None)
lon = props.get('geom:longitude', None)
if not lat or not lon:
shp = shapely.geometry.asShape(feature['geometry'])
coords = shp.centroid
lat = coords.y
lon = coords.x
return lat, lon
# please rename me
# test with 18.48361, -77.53057
def whereami(feature, **kwargs):
raise Exception, "Please finish me"
def append_hierarchy_and_parent_pip(feature, **kwargs):
return append_hierarchy_and_parent(feature, **kwargs)
# https://github.com/whosonfirst/py-mapzen-whosonfirst-pip-utils/blob/f1ec12d3ffefd35768473aebb5e6d3d19e8d5172/mapzen/whosonfirst/pip/utils/__init__.py
def append_hierarchy_and_parent(feature, **kwargs):
props = feature['properties']
placetype = props['wof:placetype']
wofid = props.get('wof:id', None)
lat, lon = reverse_geocoordinates(feature)
parents = get_reverse_geocoded(lat, lon, placetype, **kwargs)
hierarchy = get_hierarchy(parents, wofid, placetype, **kwargs)
parent_id = get_parent_id(parents)
if len(parents) == 0:
logging.debug("Failed to reverse geocode any parents for %s, %s" % (lat, lon))
elif len(parents) > 1:
logging.debug("Multiple reverse geocoding possibilities %s, %s" % (lat, lon))
props['wof:parent_id'] = parent_id
props['wof:hierarchy'] = hierarchy
feature['properties'] = props
return True
def get_hierarchy(reverse_geocoded, wofid, placetype, **kwargs):
_hiers = []
data_root = kwargs.get('data_root', None)
remote_data_root = kwargs.get('remote_data_root', 'https://whosonfirst.mapzen.com/data/')
for r in reverse_geocoded:
id = r['Id']
if data_root != None:
pf = mapzen.whosonfirst.utils.load(data_root, id)
else:
rsp = requests.get(remote_data_root + mapzen.whosonfirst.uri.id2relpath(id))
pf = json.loads(rsp.content)
pp = pf['properties']
ph = pp['wof:hierarchy']
if len(ph) == 0:
logging.warning("parent (%s) returned an empty hierarchy so making a truncated mock" % id)
pt = pp['wof:placetype']
pt = "%s_id" % pt
ph = [ {pt: id} ]
for h in ph:
if wofid:
h[ "%s_id" % placetype ] = wofid
_hiers.append(h)
return _hiers
def get_parent_id(reverse_geocoded):
parent_id = -1
if len(reverse_geocoded) == 1:
parent_id = reverse_geocoded[0]['Id']
return parent_id
def get_reverse_geocoded(lat, lon, placetype, **kwargs):
# see also : https://github.com/whosonfirst/go-whosonfirst-pip#wof-pip-server
# if a user-specified pip_server is passed, use that; otherwise use pip_proxy
pip_server = kwargs.get('pip_server', None)
if not pip_server:
pip_proxy = mapzen.whosonfirst.pip.proxy()
pt = mapzen.whosonfirst.placetypes.placetype(placetype)
_rsp = []
parents = pt.parents()
logging.debug("feature is a %s, parents are %s" % (placetype, parents))
for parent in parents:
parent = str(parent)
# TO DO: some kind of 'ping' to make sure the server is actually
# there... (20151221/thisisaaronland)
logging.debug("reverse geocode for %s w/ %s,%s" % (parent, lat, lon))
try:
if pip_server:
rsp = pip_server.reverse_geocode(lat, lon, placetype=parent, exclude=["superseded", "deprecated"])
else:
rsp = pip_proxy.reverse_geocode(parent, lat, lon, exclude=["superseded", "deprecated"])
except Exception, e:
logging.debug("failed to reverse geocode %s @%s,%s" % (parent, lat, lon))
continue
if len(rsp):
_rsp = rsp
break
return _rsp
|
bsd-3-clause
| -1,172,968,554,941,506,600
| 27.615385
| 166
| 0.628136
| false
| 3.444444
| false
| false
| false
|
arunkgupta/gramps
|
gramps/plugins/lib/maps/messagelayer.py
|
1
|
5240
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2012 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
from gi.repository import GObject
import operator
from math import *
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("maps.messagelayer")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
import cairo
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# osmGpsMap
#
#-------------------------------------------------------------------------
try:
from gi.repository import OsmGpsMap as osmgpsmap
except:
raise
class MessageLayer(GObject.GObject, osmgpsmap.MapLayer):
"""
This is the layer used to display messages over the map
"""
def __init__(self):
"""
Initialize the layer
"""
GObject.GObject.__init__(self)
self.message = []
self.color = "black"
self.font = "Arial"
self.size = 18
#families = font_map.list_families()
def clear_messages(self):
"""
reset the layer attributes.
"""
self.message = []
def clear_font_attributes(self):
"""
reset the font attributes.
"""
self.color = "black"
self.font = "Arial"
self.size = 18
def set_font_attributes(self, font, size, color):
"""
Set the font color, size and name
"""
if color is not None:
self.color = color
if font is not None:
self.font = font
if size is not None:
self.size = size
def add_message(self, message):
"""
Add a message
"""
self.message.append(message)
def do_draw(self, gpsmap, ctx):
"""
Draw the two extreme dates
"""
ctx.select_font_face(self.font,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
ctx.set_font_size(int(self.size))
color = Gdk.color_parse(self.color)
ctx.set_source_rgba(float(color.red / 65535.0),
float(color.green / 65535.0),
float(color.blue / 65535.0),
0.9) # transparency
coord_x = 100
coord_y = int(self.size) # Show the first line under the zoom button
d_width = gpsmap.get_allocation().width
gpsmap.set_size_request(300,400)
d_width -= 100
for line in self.message:
line_to_print = line
(x_bearing, y_bearing, width, height, x_advance, y_advance) = ctx.text_extents(line_to_print)
while ( width > d_width ):
line_length = len(line_to_print)
character_length = int(width/line_length) + 1
max_length = int(d_width / character_length) - 1
while line_to_print[max_length] != ' ':
max_length -= 1 # cut the line at a new word
ctx.move_to(coord_x, coord_y)
ctx.show_text(line_to_print[:max_length])
line_to_print = line_to_print[max_length:]
(x_bearing, y_bearing, width, height, x_advance, y_advance) = ctx.text_extents(line_to_print)
coord_y += int(self.size) # calculate the next line position
ctx.move_to(coord_x, coord_y)
ctx.show_text(line_to_print)
coord_y += int(self.size) # calculate the next line position
ctx.stroke()
def do_render(self, gpsmap):
"""
render the layer
"""
pass
def do_busy(self):
"""
set the layer busy
"""
return False
def do_button_press(self, gpsmap, gdkeventbutton):
"""
When we press a button.
"""
return False
GObject.type_register(MessageLayer)
|
gpl-2.0
| -414,330,747,547,846,660
| 29.823529
| 109
| 0.489885
| false
| 4.337748
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.